2008-01-09 03:46:30 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
2008-05-01 00:59:35 +07:00
|
|
|
#include <linux/blkdev.h>
|
2008-07-22 22:18:09 +07:00
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/pagevec.h>
|
2008-01-09 03:46:30 +07:00
|
|
|
#include "ctree.h"
|
|
|
|
#include "transaction.h"
|
|
|
|
#include "btrfs_inode.h"
|
2008-07-17 23:53:50 +07:00
|
|
|
#include "extent_io.h"
|
2013-05-15 14:48:23 +07:00
|
|
|
#include "disk-io.h"
|
2016-03-10 16:26:59 +07:00
|
|
|
#include "compression.h"
|
2008-01-09 03:46:30 +07:00
|
|
|
|
2012-09-06 17:01:51 +07:00
|
|
|
static struct kmem_cache *btrfs_ordered_extent_cache;
|
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
static u64 entry_end(struct btrfs_ordered_extent *entry)
|
2008-01-09 03:46:30 +07:00
|
|
|
{
|
2008-07-17 23:53:50 +07:00
|
|
|
if (entry->file_offset + entry->len < entry->file_offset)
|
|
|
|
return (u64)-1;
|
|
|
|
return entry->file_offset + entry->len;
|
2008-01-09 03:46:30 +07:00
|
|
|
}
|
|
|
|
|
2008-09-30 02:18:18 +07:00
|
|
|
/* returns NULL if the insertion worked, or it returns the node it did find
|
|
|
|
* in the tree
|
|
|
|
*/
|
2008-07-17 23:53:50 +07:00
|
|
|
static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
|
|
|
|
struct rb_node *node)
|
2008-01-09 03:46:30 +07:00
|
|
|
{
|
2009-01-06 09:25:51 +07:00
|
|
|
struct rb_node **p = &root->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_extent *entry;
|
2008-01-09 03:46:30 +07:00
|
|
|
|
2009-01-06 09:25:51 +07:00
|
|
|
while (*p) {
|
2008-01-09 03:46:30 +07:00
|
|
|
parent = *p;
|
2008-07-17 23:53:50 +07:00
|
|
|
entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
|
2008-01-09 03:46:30 +07:00
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
if (file_offset < entry->file_offset)
|
2008-01-09 03:46:30 +07:00
|
|
|
p = &(*p)->rb_left;
|
2008-07-17 23:53:50 +07:00
|
|
|
else if (file_offset >= entry_end(entry))
|
2008-01-09 03:46:30 +07:00
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
return parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(node, parent, p);
|
|
|
|
rb_insert_color(node, root);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-10-04 10:22:33 +07:00
|
|
|
static void ordered_data_tree_panic(struct inode *inode, int errno,
|
|
|
|
u64 offset)
|
|
|
|
{
|
|
|
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
2016-09-20 21:05:00 +07:00
|
|
|
btrfs_panic(fs_info, errno,
|
|
|
|
"Inconsistency in ordered tree at offset %llu", offset);
|
2011-10-04 10:22:33 +07:00
|
|
|
}
|
|
|
|
|
2008-09-30 02:18:18 +07:00
|
|
|
/*
|
|
|
|
* look for a given offset in the tree, and if it can't be found return the
|
|
|
|
* first lesser offset
|
|
|
|
*/
|
2008-07-17 23:53:50 +07:00
|
|
|
static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
|
|
|
|
struct rb_node **prev_ret)
|
2008-01-09 03:46:30 +07:00
|
|
|
{
|
2009-01-06 09:25:51 +07:00
|
|
|
struct rb_node *n = root->rb_node;
|
2008-01-09 03:46:30 +07:00
|
|
|
struct rb_node *prev = NULL;
|
2008-07-17 23:53:50 +07:00
|
|
|
struct rb_node *test;
|
|
|
|
struct btrfs_ordered_extent *entry;
|
|
|
|
struct btrfs_ordered_extent *prev_entry = NULL;
|
2008-01-09 03:46:30 +07:00
|
|
|
|
2009-01-06 09:25:51 +07:00
|
|
|
while (n) {
|
2008-07-17 23:53:50 +07:00
|
|
|
entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
|
2008-01-09 03:46:30 +07:00
|
|
|
prev = n;
|
|
|
|
prev_entry = entry;
|
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
if (file_offset < entry->file_offset)
|
2008-01-09 03:46:30 +07:00
|
|
|
n = n->rb_left;
|
2008-07-17 23:53:50 +07:00
|
|
|
else if (file_offset >= entry_end(entry))
|
2008-01-09 03:46:30 +07:00
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
if (!prev_ret)
|
|
|
|
return NULL;
|
|
|
|
|
2009-01-06 09:25:51 +07:00
|
|
|
while (prev && file_offset >= entry_end(prev_entry)) {
|
2008-07-17 23:53:50 +07:00
|
|
|
test = rb_next(prev);
|
|
|
|
if (!test)
|
|
|
|
break;
|
|
|
|
prev_entry = rb_entry(test, struct btrfs_ordered_extent,
|
|
|
|
rb_node);
|
|
|
|
if (file_offset < entry_end(prev_entry))
|
|
|
|
break;
|
|
|
|
|
|
|
|
prev = test;
|
|
|
|
}
|
|
|
|
if (prev)
|
|
|
|
prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
|
|
|
|
rb_node);
|
2009-01-06 09:25:51 +07:00
|
|
|
while (prev && file_offset < entry_end(prev_entry)) {
|
2008-07-17 23:53:50 +07:00
|
|
|
test = rb_prev(prev);
|
|
|
|
if (!test)
|
|
|
|
break;
|
|
|
|
prev_entry = rb_entry(test, struct btrfs_ordered_extent,
|
|
|
|
rb_node);
|
|
|
|
prev = test;
|
2008-01-09 03:46:30 +07:00
|
|
|
}
|
|
|
|
*prev_ret = prev;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-09-30 02:18:18 +07:00
|
|
|
/*
|
|
|
|
* helper to check if a given offset is inside a given entry
|
|
|
|
*/
|
2008-07-17 23:53:50 +07:00
|
|
|
static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
|
|
|
|
{
|
|
|
|
if (file_offset < entry->file_offset ||
|
|
|
|
entry->file_offset + entry->len <= file_offset)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2010-05-23 22:00:55 +07:00
|
|
|
static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
|
|
|
|
u64 len)
|
|
|
|
{
|
|
|
|
if (file_offset + len <= entry->file_offset ||
|
|
|
|
entry->file_offset + entry->len <= file_offset)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-09-30 02:18:18 +07:00
|
|
|
/*
|
|
|
|
* look find the first ordered struct that has this offset, otherwise
|
|
|
|
* the first one less than this offset
|
|
|
|
*/
|
2008-07-17 23:53:50 +07:00
|
|
|
static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
|
|
|
|
u64 file_offset)
|
2008-01-09 03:46:30 +07:00
|
|
|
{
|
2008-07-17 23:53:50 +07:00
|
|
|
struct rb_root *root = &tree->tree;
|
2011-02-01 07:54:59 +07:00
|
|
|
struct rb_node *prev = NULL;
|
2008-01-09 03:46:30 +07:00
|
|
|
struct rb_node *ret;
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_extent *entry;
|
|
|
|
|
|
|
|
if (tree->last) {
|
|
|
|
entry = rb_entry(tree->last, struct btrfs_ordered_extent,
|
|
|
|
rb_node);
|
|
|
|
if (offset_in_entry(entry, file_offset))
|
|
|
|
return tree->last;
|
|
|
|
}
|
|
|
|
ret = __tree_search(root, file_offset, &prev);
|
2008-01-09 03:46:30 +07:00
|
|
|
if (!ret)
|
2008-07-17 23:53:50 +07:00
|
|
|
ret = prev;
|
|
|
|
if (ret)
|
|
|
|
tree->last = ret;
|
2008-01-09 03:46:30 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/* allocate and add a new ordered_extent into the per-inode tree.
|
|
|
|
* file_offset is the logical offset in the file
|
|
|
|
*
|
|
|
|
* start is the disk block number of an extent already reserved in the
|
|
|
|
* extent allocation tree
|
|
|
|
*
|
|
|
|
* len is the length of the extent
|
|
|
|
*
|
|
|
|
* The tree is given a single reference on the ordered extent that was
|
|
|
|
* inserted.
|
|
|
|
*/
|
2010-05-23 22:00:55 +07:00
|
|
|
static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|
|
|
u64 start, u64 len, u64 disk_len,
|
2010-12-17 13:21:50 +07:00
|
|
|
int type, int dio, int compress_type)
|
2008-01-09 03:46:30 +07:00
|
|
|
{
|
2016-06-23 05:54:23 +07:00
|
|
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
2013-05-15 14:48:23 +07:00
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
2008-01-09 03:46:30 +07:00
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
2008-07-17 23:53:50 +07:00
|
|
|
struct rb_node *node;
|
|
|
|
struct btrfs_ordered_extent *entry;
|
2008-01-09 03:46:30 +07:00
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-09-06 17:01:51 +07:00
|
|
|
entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
|
2008-01-09 03:46:30 +07:00
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
entry->file_offset = file_offset;
|
|
|
|
entry->start = start;
|
|
|
|
entry->len = len;
|
Btrfs: Add zlib compression support
This is a large change for adding compression on reading and writing,
both for inline and regular extents. It does some fairly large
surgery to the writeback paths.
Compression is off by default and enabled by mount -o compress. Even
when the -o compress mount option is not used, it is possible to read
compressed extents off the disk.
If compression for a given set of pages fails to make them smaller, the
file is flagged to avoid future compression attempts later.
* While finding delalloc extents, the pages are locked before being sent down
to the delalloc handler. This allows the delalloc handler to do complex things
such as cleaning the pages, marking them writeback and starting IO on their
behalf.
* Inline extents are inserted at delalloc time now. This allows us to compress
the data before inserting the inline extent, and it allows us to insert
an inline extent that spans multiple pages.
* All of the in-memory extent representations (extent_map.c, ordered-data.c etc)
are changed to record both an in-memory size and an on disk size, as well
as a flag for compression.
From a disk format point of view, the extent pointers in the file are changed
to record the on disk size of a given extent and some encoding flags.
Space in the disk format is allocated for compression encoding, as well
as encryption and a generic 'other' field. Neither the encryption or the
'other' field are currently used.
In order to limit the amount of data read for a single random read in the
file, the size of a compressed extent is limited to 128k. This is a
software only limit, the disk format supports u64 sized compressed extents.
In order to limit the ram consumed while processing extents, the uncompressed
size of a compressed extent is limited to 256k. This is a software only limit
and will be subject to tuning later.
Checksumming is still done on compressed extents, and it is done on the
uncompressed version of the data. This way additional encodings can be
layered on without having to figure out which encoding to checksum.
Compression happens at delalloc time, which is basically singled threaded because
it is usually done by a single pdflush thread. This makes it tricky to
spread the compression load across all the cpus on the box. We'll have to
look at parallel pdflush walks of dirty inodes at a later time.
Decompression is hooked into readpages and it does spread across CPUs nicely.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-10-30 01:49:59 +07:00
|
|
|
entry->disk_len = disk_len;
|
2009-09-03 03:53:46 +07:00
|
|
|
entry->bytes_left = len;
|
2012-05-03 01:00:54 +07:00
|
|
|
entry->inode = igrab(inode);
|
2010-12-17 13:21:50 +07:00
|
|
|
entry->compress_type = compress_type;
|
2013-08-30 00:57:21 +07:00
|
|
|
entry->truncated_len = (u64)-1;
|
2008-10-31 01:25:28 +07:00
|
|
|
if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
|
2008-10-31 01:20:02 +07:00
|
|
|
set_bit(type, &entry->flags);
|
2008-07-24 22:57:52 +07:00
|
|
|
|
2010-05-23 22:00:55 +07:00
|
|
|
if (dio)
|
|
|
|
set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
|
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
/* one ref for the tree */
|
|
|
|
atomic_set(&entry->refs, 1);
|
|
|
|
init_waitqueue_head(&entry->wait);
|
|
|
|
INIT_LIST_HEAD(&entry->list);
|
2008-07-24 22:57:52 +07:00
|
|
|
INIT_LIST_HEAD(&entry->root_extent_list);
|
2012-10-25 16:41:36 +07:00
|
|
|
INIT_LIST_HEAD(&entry->work_list);
|
|
|
|
init_completion(&entry->completion);
|
2012-10-13 02:27:49 +07:00
|
|
|
INIT_LIST_HEAD(&entry->log_list);
|
2014-11-22 02:52:38 +07:00
|
|
|
INIT_LIST_HEAD(&entry->trans_list);
|
2008-01-09 03:46:30 +07:00
|
|
|
|
Btrfs: add initial tracepoint support for btrfs
Tracepoints can provide insight into why btrfs hits bugs and be greatly
helpful for debugging, e.g
dd-7822 [000] 2121.641088: btrfs_inode_request: root = 5(FS_TREE), gen = 4, ino = 256, blocks = 8, disk_i_size = 0, last_trans = 8, logged_trans = 0
dd-7822 [000] 2121.641100: btrfs_inode_new: root = 5(FS_TREE), gen = 8, ino = 257, blocks = 0, disk_i_size = 0, last_trans = 0, logged_trans = 0
btrfs-transacti-7804 [001] 2146.935420: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29368320 (orig_level = 0), cow_buf = 29388800 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.935473: btrfs_cow_block: root = 1(ROOT_TREE), refs = 2, orig_buf = 29364224 (orig_level = 0), cow_buf = 29392896 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.972221: btrfs_transaction_commit: root = 1(ROOT_TREE), gen = 8
flush-btrfs-2-7821 [001] 2155.824210: btrfs_chunk_alloc: root = 3(CHUNK_TREE), offset = 1103101952, size = 1073741824, num_stripes = 1, sub_stripes = 0, type = DATA
flush-btrfs-2-7821 [001] 2155.824241: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29388800 (orig_level = 0), cow_buf = 29396992 (cow_level = 0)
flush-btrfs-2-7821 [001] 2155.824255: btrfs_cow_block: root = 4(DEV_TREE), refs = 2, orig_buf = 29372416 (orig_level = 0), cow_buf = 29401088 (cow_level = 0)
flush-btrfs-2-7821 [000] 2155.824329: btrfs_cow_block: root = 3(CHUNK_TREE), refs = 2, orig_buf = 20971520 (orig_level = 0), cow_buf = 20975616 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898019: btrfs_cow_block: root = 5(FS_TREE), refs = 2, orig_buf = 29384704 (orig_level = 0), cow_buf = 29405184 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898043: btrfs_cow_block: root = 7(CSUM_TREE), refs = 2, orig_buf = 29376512 (orig_level = 0), cow_buf = 29409280 (cow_level = 0)
Here is what I have added:
1) ordere_extent:
btrfs_ordered_extent_add
btrfs_ordered_extent_remove
btrfs_ordered_extent_start
btrfs_ordered_extent_put
These provide critical information to understand how ordered_extents are
updated.
2) extent_map:
btrfs_get_extent
extent_map is used in both read and write cases, and it is useful for tracking
how btrfs specific IO is running.
3) writepage:
__extent_writepage
btrfs_writepage_end_io_hook
Pages are cirtical resourses and produce a lot of corner cases during writeback,
so it is valuable to know how page is written to disk.
4) inode:
btrfs_inode_new
btrfs_inode_request
btrfs_inode_evict
These can show where and when a inode is created, when a inode is evicted.
5) sync:
btrfs_sync_file
btrfs_sync_fs
These show sync arguments.
6) transaction:
btrfs_transaction_commit
In transaction based filesystem, it will be useful to know the generation and
who does commit.
7) back reference and cow:
btrfs_delayed_tree_ref
btrfs_delayed_data_ref
btrfs_delayed_ref_head
btrfs_cow_block
Btrfs natively supports back references, these tracepoints are helpful on
understanding btrfs's COW mechanism.
8) chunk:
btrfs_chunk_alloc
btrfs_chunk_free
Chunk is a link between physical offset and logical offset, and stands for space
infomation in btrfs, and these are helpful on tracing space things.
9) reserved_extent:
btrfs_reserved_extent_alloc
btrfs_reserved_extent_free
These can show how btrfs uses its space.
Signed-off-by: Liu Bo <liubo2009@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2011-03-24 18:18:59 +07:00
|
|
|
trace_btrfs_ordered_extent_add(inode, entry);
|
|
|
|
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
node = tree_insert(&tree->tree, file_offset,
|
|
|
|
&entry->rb_node);
|
2011-10-04 10:22:33 +07:00
|
|
|
if (node)
|
|
|
|
ordered_data_tree_panic(inode, -EEXIST, file_offset);
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irq(&tree->lock);
|
2009-01-06 09:25:51 +07:00
|
|
|
|
2013-05-15 14:48:23 +07:00
|
|
|
spin_lock(&root->ordered_extent_lock);
|
2008-07-24 22:57:52 +07:00
|
|
|
list_add_tail(&entry->root_extent_list,
|
2013-05-15 14:48:23 +07:00
|
|
|
&root->ordered_extents);
|
|
|
|
root->nr_ordered_extents++;
|
|
|
|
if (root->nr_ordered_extents == 1) {
|
2016-06-23 05:54:23 +07:00
|
|
|
spin_lock(&fs_info->ordered_root_lock);
|
2013-05-15 14:48:23 +07:00
|
|
|
BUG_ON(!list_empty(&root->ordered_root));
|
2016-06-23 05:54:23 +07:00
|
|
|
list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
|
|
|
|
spin_unlock(&fs_info->ordered_root_lock);
|
2013-05-15 14:48:23 +07:00
|
|
|
}
|
|
|
|
spin_unlock(&root->ordered_extent_lock);
|
2008-07-24 22:57:52 +07:00
|
|
|
|
2008-01-09 03:46:30 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-23 22:00:55 +07:00
|
|
|
int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|
|
|
u64 start, u64 len, u64 disk_len, int type)
|
|
|
|
{
|
|
|
|
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
|
2010-12-17 13:21:50 +07:00
|
|
|
disk_len, type, 0,
|
|
|
|
BTRFS_COMPRESS_NONE);
|
2010-05-23 22:00:55 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
|
|
|
|
u64 start, u64 len, u64 disk_len, int type)
|
|
|
|
{
|
|
|
|
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
|
2010-12-17 13:21:50 +07:00
|
|
|
disk_len, type, 1,
|
|
|
|
BTRFS_COMPRESS_NONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
|
|
|
|
u64 start, u64 len, u64 disk_len,
|
|
|
|
int type, int compress_type)
|
|
|
|
{
|
|
|
|
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
|
|
|
|
disk_len, type, 0,
|
|
|
|
compress_type);
|
2010-05-23 22:00:55 +07:00
|
|
|
}
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* Add a struct btrfs_ordered_sum into the list of checksums to be inserted
|
2008-07-18 17:17:13 +07:00
|
|
|
* when an ordered extent is finished. If the list covers more than one
|
|
|
|
* ordered extent, it is split across multiples.
|
2008-07-18 00:53:27 +07:00
|
|
|
*/
|
2012-03-01 20:56:26 +07:00
|
|
|
void btrfs_add_ordered_sum(struct inode *inode,
|
|
|
|
struct btrfs_ordered_extent *entry,
|
|
|
|
struct btrfs_ordered_sum *sum)
|
2008-01-09 03:46:30 +07:00
|
|
|
{
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
2008-01-09 03:46:30 +07:00
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
list_add_tail(&sum->list, &entry->list);
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irq(&tree->lock);
|
2008-01-09 03:46:30 +07:00
|
|
|
}
|
|
|
|
|
2010-11-29 07:56:33 +07:00
|
|
|
/*
|
|
|
|
* this is used to account for finished IO across a given range
|
|
|
|
* of the file. The IO may span ordered extents. If
|
|
|
|
* a given ordered_extent is completely done, 1 is returned, otherwise
|
|
|
|
* 0.
|
|
|
|
*
|
|
|
|
* test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
|
|
|
|
* to make sure this function only returns 1 once for a given ordered extent.
|
|
|
|
*
|
|
|
|
* file_offset is updated to one byte past the range that is recorded as
|
|
|
|
* complete. This allows you to walk forward in the file.
|
|
|
|
*/
|
|
|
|
int btrfs_dec_test_first_ordered_pending(struct inode *inode,
|
|
|
|
struct btrfs_ordered_extent **cached,
|
2012-05-03 01:00:54 +07:00
|
|
|
u64 *file_offset, u64 io_size, int uptodate)
|
2010-11-29 07:56:33 +07:00
|
|
|
{
|
2016-06-23 05:54:23 +07:00
|
|
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
2010-11-29 07:56:33 +07:00
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
|
|
|
struct rb_node *node;
|
|
|
|
struct btrfs_ordered_extent *entry = NULL;
|
|
|
|
int ret;
|
2012-05-03 01:00:54 +07:00
|
|
|
unsigned long flags;
|
2010-11-29 07:56:33 +07:00
|
|
|
u64 dec_end;
|
|
|
|
u64 dec_start;
|
|
|
|
u64 to_dec;
|
|
|
|
|
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irqsave(&tree->lock, flags);
|
2010-11-29 07:56:33 +07:00
|
|
|
node = tree_search(tree, *file_offset);
|
|
|
|
if (!node) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
|
|
|
if (!offset_in_entry(entry, *file_offset)) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
dec_start = max(*file_offset, entry->file_offset);
|
|
|
|
dec_end = min(*file_offset + io_size, entry->file_offset +
|
|
|
|
entry->len);
|
|
|
|
*file_offset = dec_end;
|
|
|
|
if (dec_start > dec_end) {
|
2016-06-23 05:54:23 +07:00
|
|
|
btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
|
|
|
|
dec_start, dec_end);
|
2010-11-29 07:56:33 +07:00
|
|
|
}
|
|
|
|
to_dec = dec_end - dec_start;
|
|
|
|
if (to_dec > entry->bytes_left) {
|
2016-06-23 05:54:23 +07:00
|
|
|
btrfs_crit(fs_info,
|
|
|
|
"bad ordered accounting left %llu size %llu",
|
|
|
|
entry->bytes_left, to_dec);
|
2010-11-29 07:56:33 +07:00
|
|
|
}
|
|
|
|
entry->bytes_left -= to_dec;
|
2012-05-03 01:00:54 +07:00
|
|
|
if (!uptodate)
|
|
|
|
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
|
|
|
|
|
2014-03-06 12:54:56 +07:00
|
|
|
if (entry->bytes_left == 0) {
|
2010-11-29 07:56:33 +07:00
|
|
|
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
2015-02-17 01:36:47 +07:00
|
|
|
/*
|
|
|
|
* Implicit memory barrier after test_and_set_bit
|
|
|
|
*/
|
2014-03-06 12:54:56 +07:00
|
|
|
if (waitqueue_active(&entry->wait))
|
|
|
|
wake_up(&entry->wait);
|
|
|
|
} else {
|
2010-11-29 07:56:33 +07:00
|
|
|
ret = 1;
|
2014-03-06 12:54:56 +07:00
|
|
|
}
|
2010-11-29 07:56:33 +07:00
|
|
|
out:
|
|
|
|
if (!ret && cached && entry) {
|
|
|
|
*cached = entry;
|
|
|
|
atomic_inc(&entry->refs);
|
|
|
|
}
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irqrestore(&tree->lock, flags);
|
2010-11-29 07:56:33 +07:00
|
|
|
return ret == 0;
|
|
|
|
}
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* this is used to account for finished IO across a given range
|
|
|
|
* of the file. The IO should not span ordered extents. If
|
|
|
|
* a given ordered_extent is completely done, 1 is returned, otherwise
|
|
|
|
* 0.
|
|
|
|
*
|
|
|
|
* test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
|
|
|
|
* to make sure this function only returns 1 once for a given ordered extent.
|
|
|
|
*/
|
2008-07-17 23:53:50 +07:00
|
|
|
int btrfs_dec_test_ordered_pending(struct inode *inode,
|
2010-02-03 03:51:14 +07:00
|
|
|
struct btrfs_ordered_extent **cached,
|
2012-05-03 01:00:54 +07:00
|
|
|
u64 file_offset, u64 io_size, int uptodate)
|
2008-01-09 03:46:30 +07:00
|
|
|
{
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
2008-01-09 03:46:30 +07:00
|
|
|
struct rb_node *node;
|
2010-02-03 03:51:14 +07:00
|
|
|
struct btrfs_ordered_extent *entry = NULL;
|
2012-05-03 01:00:54 +07:00
|
|
|
unsigned long flags;
|
2008-07-17 23:53:50 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irqsave(&tree->lock, flags);
|
|
|
|
if (cached && *cached) {
|
|
|
|
entry = *cached;
|
|
|
|
goto have_entry;
|
|
|
|
}
|
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
node = tree_search(tree, file_offset);
|
2008-01-09 03:46:30 +07:00
|
|
|
if (!node) {
|
2008-07-17 23:53:50 +07:00
|
|
|
ret = 1;
|
|
|
|
goto out;
|
2008-01-09 03:46:30 +07:00
|
|
|
}
|
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
2012-05-03 01:00:54 +07:00
|
|
|
have_entry:
|
2008-07-17 23:53:50 +07:00
|
|
|
if (!offset_in_entry(entry, file_offset)) {
|
|
|
|
ret = 1;
|
|
|
|
goto out;
|
2008-01-09 03:46:30 +07:00
|
|
|
}
|
2008-07-17 23:53:50 +07:00
|
|
|
|
2009-09-03 03:53:46 +07:00
|
|
|
if (io_size > entry->bytes_left) {
|
2013-12-20 23:37:06 +07:00
|
|
|
btrfs_crit(BTRFS_I(inode)->root->fs_info,
|
|
|
|
"bad ordered accounting left %llu size %llu",
|
2013-08-20 18:20:07 +07:00
|
|
|
entry->bytes_left, io_size);
|
2009-09-03 03:53:46 +07:00
|
|
|
}
|
|
|
|
entry->bytes_left -= io_size;
|
2012-05-03 01:00:54 +07:00
|
|
|
if (!uptodate)
|
|
|
|
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
|
|
|
|
|
2014-03-06 12:54:56 +07:00
|
|
|
if (entry->bytes_left == 0) {
|
2008-07-17 23:53:50 +07:00
|
|
|
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
2015-02-17 01:36:47 +07:00
|
|
|
/*
|
|
|
|
* Implicit memory barrier after test_and_set_bit
|
|
|
|
*/
|
2014-03-06 12:54:56 +07:00
|
|
|
if (waitqueue_active(&entry->wait))
|
|
|
|
wake_up(&entry->wait);
|
|
|
|
} else {
|
2009-09-03 03:53:46 +07:00
|
|
|
ret = 1;
|
2014-03-06 12:54:56 +07:00
|
|
|
}
|
2008-07-17 23:53:50 +07:00
|
|
|
out:
|
2010-02-03 03:51:14 +07:00
|
|
|
if (!ret && cached && entry) {
|
|
|
|
*cached = entry;
|
|
|
|
atomic_inc(&entry->refs);
|
|
|
|
}
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irqrestore(&tree->lock, flags);
|
2008-07-17 23:53:50 +07:00
|
|
|
return ret == 0;
|
|
|
|
}
|
2008-01-09 03:46:30 +07:00
|
|
|
|
2012-10-13 02:27:49 +07:00
|
|
|
/* Needs to either be called under a log transaction or the log_mutex */
|
2014-01-14 19:31:51 +07:00
|
|
|
void btrfs_get_logged_extents(struct inode *inode,
|
2014-11-14 00:00:35 +07:00
|
|
|
struct list_head *logged_list,
|
|
|
|
const loff_t start,
|
|
|
|
const loff_t end)
|
2012-10-13 02:27:49 +07:00
|
|
|
{
|
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
struct rb_node *n;
|
2014-11-14 00:00:35 +07:00
|
|
|
struct rb_node *prev;
|
2012-10-13 02:27:49 +07:00
|
|
|
|
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
|
|
|
spin_lock_irq(&tree->lock);
|
2014-11-14 00:00:35 +07:00
|
|
|
n = __tree_search(&tree->tree, end, &prev);
|
|
|
|
if (!n)
|
|
|
|
n = prev;
|
|
|
|
for (; n; n = rb_prev(n)) {
|
2012-10-13 02:27:49 +07:00
|
|
|
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
|
2014-11-14 00:00:35 +07:00
|
|
|
if (ordered->file_offset > end)
|
|
|
|
continue;
|
|
|
|
if (entry_end(ordered) <= start)
|
|
|
|
break;
|
Btrfs: fix fsync race leading to ordered extent memory leaks
We can have multiple fsync operations against the same file during the
same transaction and they can collect the same ordered extents while they
don't complete (still accessible from the inode's ordered tree). If this
happens, those ordered extents will never get their reference counts
decremented to 0, leading to memory leaks and inode leaks (an iput for an
ordered extent's inode is scheduled only when the ordered extent's refcount
drops to 0). The following sequence diagram explains this race:
CPU 1 CPU 2
btrfs_sync_file()
btrfs_sync_file()
mutex_lock(inode->i_mutex)
btrfs_log_inode()
btrfs_get_logged_extents()
--> collects ordered extent X
--> increments ordered
extent X's refcount
btrfs_submit_logged_extents()
mutex_unlock(inode->i_mutex)
mutex_lock(inode->i_mutex)
btrfs_sync_log()
btrfs_wait_logged_extents()
--> list_del_init(&ordered->log_list)
btrfs_log_inode()
btrfs_get_logged_extents()
--> Adds ordered extent X
to logged_list because
at this point:
list_empty(&ordered->log_list)
&& test_bit(BTRFS_ORDERED_LOGGED,
&ordered->flags) == 0
--> Increments ordered extent
X's refcount
--> check if ordered extent's io is
finished or not, start it if
necessary and wait for it to finish
--> sets bit BTRFS_ORDERED_LOGGED
on ordered extent X's flags
and adds it to trans->ordered
btrfs_sync_log() finishes
btrfs_submit_logged_extents()
btrfs_log_inode() finishes
mutex_unlock(inode->i_mutex)
btrfs_sync_file() finishes
btrfs_sync_log()
btrfs_wait_logged_extents()
--> Sees ordered extent X has the
bit BTRFS_ORDERED_LOGGED set in
its flags
--> X's refcount is untouched
btrfs_sync_log() finishes
btrfs_sync_file() finishes
btrfs_commit_transaction()
--> called by transaction kthread for e.g.
btrfs_wait_pending_ordered()
--> waits for ordered extent X to
complete
--> decrements ordered extent X's
refcount by 1 only, corresponding
to the increment done by the fsync
task ran by CPU 1
In the scenario of the above diagram, after the transaction commit,
the ordered extent will remain with a refcount of 1 forever, leaking
the ordered extent structure and preventing the i_count of its inode
from ever decreasing to 0, since the delayed iput is scheduled only
when the ordered extent's refcount drops to 0, preventing the inode
from ever being evicted by the VFS.
Fix this by using the flag BTRFS_ORDERED_LOGGED differently. Use it to
mean that an ordered extent is already being processed by an fsync call,
which will attach it to the current transaction, preventing it from being
collected by subsequent fsync operations against the same inode.
This race was introduced with the following change (added in 3.19 and
backported to stable 3.18 and 3.17):
Btrfs: make sure logged extents complete in the current transaction V3
commit 50d9aa99bd35c77200e0e3dd7a72274f8304701f
I ran into this issue while running xfstests/generic/113 in a loop, which
failed about 1 out of 10 runs with the following warning in dmesg:
[ 2612.440038] WARNING: CPU: 4 PID: 22057 at fs/btrfs/disk-io.c:3558 free_fs_root+0x36/0x133 [btrfs]()
[ 2612.442810] Modules linked in: btrfs crc32c_generic xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop processor parport_pc parport psmouse therma
l_sys i2c_piix4 serio_raw pcspkr evdev microcode button i2c_core ext4 crc16 jbd2 mbcache sd_mod sg sr_mod cdrom virtio_scsi ata_generic virtio_pci ata_piix virtio_ring libata virtio flo
ppy e1000 scsi_mod [last unloaded: btrfs]
[ 2612.452711] CPU: 4 PID: 22057 Comm: umount Tainted: G W 3.19.0-rc5-btrfs-next-4+ #1
[ 2612.454921] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
[ 2612.457709] 0000000000000009 ffff8801342c3c78 ffffffff8142425e ffff88023ec8f2d8
[ 2612.459829] 0000000000000000 ffff8801342c3cb8 ffffffff81045308 ffff880046460000
[ 2612.461564] ffffffffa036da56 ffff88003d07b000 ffff880046460000 ffff880046460068
[ 2612.463163] Call Trace:
[ 2612.463719] [<ffffffff8142425e>] dump_stack+0x4c/0x65
[ 2612.464789] [<ffffffff81045308>] warn_slowpath_common+0xa1/0xbb
[ 2612.466026] [<ffffffffa036da56>] ? free_fs_root+0x36/0x133 [btrfs]
[ 2612.467247] [<ffffffff810453c5>] warn_slowpath_null+0x1a/0x1c
[ 2612.468416] [<ffffffffa036da56>] free_fs_root+0x36/0x133 [btrfs]
[ 2612.469625] [<ffffffffa036f2a7>] btrfs_drop_and_free_fs_root+0x93/0x9b [btrfs]
[ 2612.471251] [<ffffffffa036f353>] btrfs_free_fs_roots+0xa4/0xd6 [btrfs]
[ 2612.472536] [<ffffffff8142612e>] ? wait_for_completion+0x24/0x26
[ 2612.473742] [<ffffffffa0370bbc>] close_ctree+0x1f3/0x33c [btrfs]
[ 2612.475477] [<ffffffff81059d1d>] ? destroy_workqueue+0x148/0x1ba
[ 2612.476695] [<ffffffffa034e3da>] btrfs_put_super+0x19/0x1b [btrfs]
[ 2612.477911] [<ffffffff81153e53>] generic_shutdown_super+0x73/0xef
[ 2612.479106] [<ffffffff811540e2>] kill_anon_super+0x13/0x1e
[ 2612.480226] [<ffffffffa034e1e3>] btrfs_kill_super+0x17/0x23 [btrfs]
[ 2612.481471] [<ffffffff81154307>] deactivate_locked_super+0x3b/0x50
[ 2612.482686] [<ffffffff811547a7>] deactivate_super+0x3f/0x43
[ 2612.483791] [<ffffffff8116b3ed>] cleanup_mnt+0x59/0x78
[ 2612.484842] [<ffffffff8116b44c>] __cleanup_mnt+0x12/0x14
[ 2612.485900] [<ffffffff8105d019>] task_work_run+0x8f/0xbc
[ 2612.486960] [<ffffffff810028d8>] do_notify_resume+0x5a/0x6b
[ 2612.488083] [<ffffffff81236e5b>] ? trace_hardirqs_on_thunk+0x3a/0x3f
[ 2612.489333] [<ffffffff8142a17f>] int_signal+0x12/0x17
[ 2612.490353] ---[ end trace 54a960a6bdcb8d93 ]---
[ 2612.557253] VFS: Busy inodes after unmount of sdb. Self-destruct in 5 seconds. Have a nice day...
Kmemleak confirmed the ordered extent leak (and btrfs inode specific
structures such as delayed nodes):
$ cat /sys/kernel/debug/kmemleak
unreferenced object 0xffff880154290db0 (size 576):
comm "btrfsck", pid 21980, jiffies 4295542503 (age 1273.412s)
hex dump (first 32 bytes):
01 40 00 00 01 00 00 00 b0 1d f1 4e 01 88 ff ff .@.........N....
00 00 00 00 00 00 00 00 c8 0d 29 54 01 88 ff ff ..........)T....
backtrace:
[<ffffffff8141d74d>] kmemleak_update_trace+0x4c/0x6a
[<ffffffff8122f2c0>] radix_tree_node_alloc+0x6d/0x83
[<ffffffff8122fb26>] __radix_tree_create+0x109/0x190
[<ffffffff8122fbdd>] radix_tree_insert+0x30/0xac
[<ffffffffa03b9bde>] btrfs_get_or_create_delayed_node+0x130/0x187 [btrfs]
[<ffffffffa03bb82d>] btrfs_delayed_delete_inode_ref+0x32/0xac [btrfs]
[<ffffffffa0379dae>] __btrfs_unlink_inode+0xee/0x288 [btrfs]
[<ffffffffa037c715>] btrfs_unlink_inode+0x1e/0x40 [btrfs]
[<ffffffffa037c797>] btrfs_unlink+0x60/0x9b [btrfs]
[<ffffffff8115d7f0>] vfs_unlink+0x9c/0xed
[<ffffffff8115f5de>] do_unlinkat+0x12c/0x1fa
[<ffffffff811601a7>] SyS_unlinkat+0x29/0x2b
[<ffffffff81429e92>] system_call_fastpath+0x12/0x17
[<ffffffffffffffff>] 0xffffffffffffffff
unreferenced object 0xffff88014ef11db0 (size 576):
comm "rm", pid 22009, jiffies 4295542593 (age 1273.052s)
hex dump (first 32 bytes):
02 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 c8 1d f1 4e 01 88 ff ff ...........N....
backtrace:
[<ffffffff8141d74d>] kmemleak_update_trace+0x4c/0x6a
[<ffffffff8122f2c0>] radix_tree_node_alloc+0x6d/0x83
[<ffffffff8122fb26>] __radix_tree_create+0x109/0x190
[<ffffffff8122fbdd>] radix_tree_insert+0x30/0xac
[<ffffffffa03b9bde>] btrfs_get_or_create_delayed_node+0x130/0x187 [btrfs]
[<ffffffffa03bb82d>] btrfs_delayed_delete_inode_ref+0x32/0xac [btrfs]
[<ffffffffa0379dae>] __btrfs_unlink_inode+0xee/0x288 [btrfs]
[<ffffffffa037c715>] btrfs_unlink_inode+0x1e/0x40 [btrfs]
[<ffffffffa037c797>] btrfs_unlink+0x60/0x9b [btrfs]
[<ffffffff8115d7f0>] vfs_unlink+0x9c/0xed
[<ffffffff8115f5de>] do_unlinkat+0x12c/0x1fa
[<ffffffff811601a7>] SyS_unlinkat+0x29/0x2b
[<ffffffff81429e92>] system_call_fastpath+0x12/0x17
[<ffffffffffffffff>] 0xffffffffffffffff
unreferenced object 0xffff8800336feda8 (size 584):
comm "aio-stress", pid 22031, jiffies 4295543006 (age 1271.400s)
hex dump (first 32 bytes):
00 40 3e 00 00 00 00 00 00 00 8f 42 00 00 00 00 .@>........B....
00 00 01 00 00 00 00 00 00 00 01 00 00 00 00 00 ................
backtrace:
[<ffffffff8114eb34>] create_object+0x172/0x29a
[<ffffffff8141d790>] kmemleak_alloc+0x25/0x41
[<ffffffff81141ae6>] kmemleak_alloc_recursive.constprop.52+0x16/0x18
[<ffffffff81145288>] kmem_cache_alloc+0xf7/0x198
[<ffffffffa0389243>] __btrfs_add_ordered_extent+0x43/0x309 [btrfs]
[<ffffffffa038968b>] btrfs_add_ordered_extent_dio+0x12/0x14 [btrfs]
[<ffffffffa03810e2>] btrfs_get_blocks_direct+0x3ef/0x571 [btrfs]
[<ffffffff81181349>] do_blockdev_direct_IO+0x62a/0xb47
[<ffffffff8118189a>] __blockdev_direct_IO+0x34/0x36
[<ffffffffa03776e5>] btrfs_direct_IO+0x16a/0x1e8 [btrfs]
[<ffffffff81100373>] generic_file_direct_write+0xb8/0x12d
[<ffffffffa038615c>] btrfs_file_write_iter+0x24b/0x42f [btrfs]
[<ffffffff8118bb0d>] aio_run_iocb+0x2b7/0x32e
[<ffffffff8118c99a>] do_io_submit+0x26e/0x2ff
[<ffffffff8118ca3b>] SyS_io_submit+0x10/0x12
[<ffffffff81429e92>] system_call_fastpath+0x12/0x17
CC: <stable@vger.kernel.org> # 3.19, 3.18 and 3.17
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
2015-02-10 00:17:43 +07:00
|
|
|
if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
|
2014-11-22 02:52:38 +07:00
|
|
|
continue;
|
2014-11-14 00:00:35 +07:00
|
|
|
list_add(&ordered->log_list, logged_list);
|
2014-01-14 19:31:51 +07:00
|
|
|
atomic_inc(&ordered->refs);
|
2012-10-13 02:27:49 +07:00
|
|
|
}
|
|
|
|
spin_unlock_irq(&tree->lock);
|
|
|
|
}
|
|
|
|
|
2014-01-14 19:31:51 +07:00
|
|
|
void btrfs_put_logged_extents(struct list_head *logged_list)
|
|
|
|
{
|
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
|
|
|
|
while (!list_empty(logged_list)) {
|
|
|
|
ordered = list_first_entry(logged_list,
|
|
|
|
struct btrfs_ordered_extent,
|
|
|
|
log_list);
|
|
|
|
list_del_init(&ordered->log_list);
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_submit_logged_extents(struct list_head *logged_list,
|
|
|
|
struct btrfs_root *log)
|
|
|
|
{
|
|
|
|
int index = log->log_transid % 2;
|
|
|
|
|
|
|
|
spin_lock_irq(&log->log_extents_lock[index]);
|
|
|
|
list_splice_tail(logged_list, &log->logged_list[index]);
|
|
|
|
spin_unlock_irq(&log->log_extents_lock[index]);
|
|
|
|
}
|
|
|
|
|
2014-11-22 02:52:38 +07:00
|
|
|
void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *log, u64 transid)
|
2012-10-13 02:27:49 +07:00
|
|
|
{
|
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
int index = transid % 2;
|
|
|
|
|
|
|
|
spin_lock_irq(&log->log_extents_lock[index]);
|
|
|
|
while (!list_empty(&log->logged_list[index])) {
|
2015-09-25 03:17:39 +07:00
|
|
|
struct inode *inode;
|
2012-10-13 02:27:49 +07:00
|
|
|
ordered = list_first_entry(&log->logged_list[index],
|
|
|
|
struct btrfs_ordered_extent,
|
|
|
|
log_list);
|
|
|
|
list_del_init(&ordered->log_list);
|
2015-09-25 03:17:39 +07:00
|
|
|
inode = ordered->inode;
|
2012-10-13 02:27:49 +07:00
|
|
|
spin_unlock_irq(&log->log_extents_lock[index]);
|
Btrfs: fix abnormal long waiting in fsync
xfstests generic/127 detected this problem.
With commit 7fc34a62ca4434a79c68e23e70ed26111b7a4cf8, now fsync will only flush
data within the passed range. This is the cause of the above problem,
-- btrfs's fsync has a stage called 'sync log' which will wait for all the
ordered extents it've recorded to finish.
In xfstests/generic/127, with mixed operations such as truncate, fallocate,
punch hole, and mapwrite, we get some pre-allocated extents, and mapwrite will
mmap, and then msync. And I find that msync will wait for quite a long time
(about 20s in my case), thanks to ftrace, it turns out that the previous
fallocate calls 'btrfs_wait_ordered_range()' to flush dirty pages, but as the
range of dirty pages may be larger than 'btrfs_wait_ordered_range()' wants,
there can be some ordered extents created but not getting corresponding pages
flushed, then they're left in memory until we fsync which runs into the
stage 'sync log', and fsync will just wait for the system writeback thread
to flush those pages and get ordered extents finished, so the latency is
inevitable.
This adds a flush similar to btrfs_start_ordered_extent() in
btrfs_wait_logged_extents() to fix that.
Reviewed-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <clm@fb.com>
2014-07-17 15:08:36 +07:00
|
|
|
|
|
|
|
if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
|
|
|
|
!test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
|
|
|
|
u64 start = ordered->file_offset;
|
|
|
|
u64 end = ordered->file_offset + ordered->len - 1;
|
|
|
|
|
|
|
|
WARN_ON(!inode);
|
|
|
|
filemap_fdatawrite_range(inode->i_mapping, start, end);
|
|
|
|
}
|
2012-10-13 02:27:49 +07:00
|
|
|
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
|
|
|
|
&ordered->flags));
|
Btrfs: fix abnormal long waiting in fsync
xfstests generic/127 detected this problem.
With commit 7fc34a62ca4434a79c68e23e70ed26111b7a4cf8, now fsync will only flush
data within the passed range. This is the cause of the above problem,
-- btrfs's fsync has a stage called 'sync log' which will wait for all the
ordered extents it've recorded to finish.
In xfstests/generic/127, with mixed operations such as truncate, fallocate,
punch hole, and mapwrite, we get some pre-allocated extents, and mapwrite will
mmap, and then msync. And I find that msync will wait for quite a long time
(about 20s in my case), thanks to ftrace, it turns out that the previous
fallocate calls 'btrfs_wait_ordered_range()' to flush dirty pages, but as the
range of dirty pages may be larger than 'btrfs_wait_ordered_range()' wants,
there can be some ordered extents created but not getting corresponding pages
flushed, then they're left in memory until we fsync which runs into the
stage 'sync log', and fsync will just wait for the system writeback thread
to flush those pages and get ordered extents finished, so the latency is
inevitable.
This adds a flush similar to btrfs_start_ordered_extent() in
btrfs_wait_logged_extents() to fix that.
Reviewed-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <clm@fb.com>
2014-07-17 15:08:36 +07:00
|
|
|
|
2015-04-17 23:08:37 +07:00
|
|
|
/*
|
2015-09-25 03:17:39 +07:00
|
|
|
* In order to keep us from losing our ordered extent
|
|
|
|
* information when committing the transaction we have to make
|
|
|
|
* sure that any logged extents are completed when we go to
|
|
|
|
* commit the transaction. To do this we simply increase the
|
|
|
|
* current transactions pending_ordered counter and decrement it
|
|
|
|
* when the ordered extent completes.
|
2015-04-17 23:08:37 +07:00
|
|
|
*/
|
2015-09-25 03:17:39 +07:00
|
|
|
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
|
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
|
|
|
|
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
|
|
|
spin_lock_irq(&tree->lock);
|
|
|
|
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
|
|
|
|
set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
|
|
|
|
atomic_inc(&trans->transaction->pending_ordered);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&tree->lock);
|
|
|
|
}
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
2012-10-13 02:27:49 +07:00
|
|
|
spin_lock_irq(&log->log_extents_lock[index]);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&log->log_extents_lock[index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
|
|
|
|
{
|
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
int index = transid % 2;
|
|
|
|
|
|
|
|
spin_lock_irq(&log->log_extents_lock[index]);
|
|
|
|
while (!list_empty(&log->logged_list[index])) {
|
|
|
|
ordered = list_first_entry(&log->logged_list[index],
|
|
|
|
struct btrfs_ordered_extent,
|
|
|
|
log_list);
|
|
|
|
list_del_init(&ordered->log_list);
|
|
|
|
spin_unlock_irq(&log->log_extents_lock[index]);
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
spin_lock_irq(&log->log_extents_lock[index]);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&log->log_extents_lock[index]);
|
|
|
|
}
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* used to drop a reference on an ordered extent. This will free
|
|
|
|
* the extent if the last reference is dropped
|
|
|
|
*/
|
2012-03-01 20:56:26 +07:00
|
|
|
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
|
2008-07-17 23:53:50 +07:00
|
|
|
{
|
2008-07-17 23:54:15 +07:00
|
|
|
struct list_head *cur;
|
|
|
|
struct btrfs_ordered_sum *sum;
|
|
|
|
|
Btrfs: add initial tracepoint support for btrfs
Tracepoints can provide insight into why btrfs hits bugs and be greatly
helpful for debugging, e.g
dd-7822 [000] 2121.641088: btrfs_inode_request: root = 5(FS_TREE), gen = 4, ino = 256, blocks = 8, disk_i_size = 0, last_trans = 8, logged_trans = 0
dd-7822 [000] 2121.641100: btrfs_inode_new: root = 5(FS_TREE), gen = 8, ino = 257, blocks = 0, disk_i_size = 0, last_trans = 0, logged_trans = 0
btrfs-transacti-7804 [001] 2146.935420: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29368320 (orig_level = 0), cow_buf = 29388800 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.935473: btrfs_cow_block: root = 1(ROOT_TREE), refs = 2, orig_buf = 29364224 (orig_level = 0), cow_buf = 29392896 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.972221: btrfs_transaction_commit: root = 1(ROOT_TREE), gen = 8
flush-btrfs-2-7821 [001] 2155.824210: btrfs_chunk_alloc: root = 3(CHUNK_TREE), offset = 1103101952, size = 1073741824, num_stripes = 1, sub_stripes = 0, type = DATA
flush-btrfs-2-7821 [001] 2155.824241: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29388800 (orig_level = 0), cow_buf = 29396992 (cow_level = 0)
flush-btrfs-2-7821 [001] 2155.824255: btrfs_cow_block: root = 4(DEV_TREE), refs = 2, orig_buf = 29372416 (orig_level = 0), cow_buf = 29401088 (cow_level = 0)
flush-btrfs-2-7821 [000] 2155.824329: btrfs_cow_block: root = 3(CHUNK_TREE), refs = 2, orig_buf = 20971520 (orig_level = 0), cow_buf = 20975616 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898019: btrfs_cow_block: root = 5(FS_TREE), refs = 2, orig_buf = 29384704 (orig_level = 0), cow_buf = 29405184 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898043: btrfs_cow_block: root = 7(CSUM_TREE), refs = 2, orig_buf = 29376512 (orig_level = 0), cow_buf = 29409280 (cow_level = 0)
Here is what I have added:
1) ordere_extent:
btrfs_ordered_extent_add
btrfs_ordered_extent_remove
btrfs_ordered_extent_start
btrfs_ordered_extent_put
These provide critical information to understand how ordered_extents are
updated.
2) extent_map:
btrfs_get_extent
extent_map is used in both read and write cases, and it is useful for tracking
how btrfs specific IO is running.
3) writepage:
__extent_writepage
btrfs_writepage_end_io_hook
Pages are cirtical resourses and produce a lot of corner cases during writeback,
so it is valuable to know how page is written to disk.
4) inode:
btrfs_inode_new
btrfs_inode_request
btrfs_inode_evict
These can show where and when a inode is created, when a inode is evicted.
5) sync:
btrfs_sync_file
btrfs_sync_fs
These show sync arguments.
6) transaction:
btrfs_transaction_commit
In transaction based filesystem, it will be useful to know the generation and
who does commit.
7) back reference and cow:
btrfs_delayed_tree_ref
btrfs_delayed_data_ref
btrfs_delayed_ref_head
btrfs_cow_block
Btrfs natively supports back references, these tracepoints are helpful on
understanding btrfs's COW mechanism.
8) chunk:
btrfs_chunk_alloc
btrfs_chunk_free
Chunk is a link between physical offset and logical offset, and stands for space
infomation in btrfs, and these are helpful on tracing space things.
9) reserved_extent:
btrfs_reserved_extent_alloc
btrfs_reserved_extent_free
These can show how btrfs uses its space.
Signed-off-by: Liu Bo <liubo2009@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2011-03-24 18:18:59 +07:00
|
|
|
trace_btrfs_ordered_extent_put(entry->inode, entry);
|
|
|
|
|
2008-07-17 23:54:15 +07:00
|
|
|
if (atomic_dec_and_test(&entry->refs)) {
|
2015-07-01 18:13:10 +07:00
|
|
|
ASSERT(list_empty(&entry->log_list));
|
|
|
|
ASSERT(list_empty(&entry->trans_list));
|
|
|
|
ASSERT(list_empty(&entry->root_extent_list));
|
|
|
|
ASSERT(RB_EMPTY_NODE(&entry->rb_node));
|
2012-05-03 01:00:54 +07:00
|
|
|
if (entry->inode)
|
|
|
|
btrfs_add_delayed_iput(entry->inode);
|
2009-01-06 09:25:51 +07:00
|
|
|
while (!list_empty(&entry->list)) {
|
2008-07-17 23:54:15 +07:00
|
|
|
cur = entry->list.next;
|
|
|
|
sum = list_entry(cur, struct btrfs_ordered_sum, list);
|
|
|
|
list_del(&sum->list);
|
|
|
|
kfree(sum);
|
|
|
|
}
|
2012-09-06 17:01:51 +07:00
|
|
|
kmem_cache_free(btrfs_ordered_extent_cache, entry);
|
2008-07-17 23:54:15 +07:00
|
|
|
}
|
2008-01-09 03:46:30 +07:00
|
|
|
}
|
2008-01-15 20:40:48 +07:00
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* remove an ordered extent from the tree. No references are dropped
|
2012-05-03 01:00:54 +07:00
|
|
|
* and waiters are woken up.
|
2008-07-18 00:53:27 +07:00
|
|
|
*/
|
2012-05-03 01:00:54 +07:00
|
|
|
void btrfs_remove_ordered_extent(struct inode *inode,
|
|
|
|
struct btrfs_ordered_extent *entry)
|
2008-01-15 20:40:48 +07:00
|
|
|
{
|
2016-06-23 05:54:23 +07:00
|
|
|
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
2010-03-20 01:07:23 +07:00
|
|
|
struct btrfs_root *root = BTRFS_I(inode)->root;
|
2008-01-15 20:40:48 +07:00
|
|
|
struct rb_node *node;
|
2015-09-25 03:17:39 +07:00
|
|
|
bool dec_pending_ordered = false;
|
2008-01-15 20:40:48 +07:00
|
|
|
|
2008-07-17 23:53:50 +07:00
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
node = &entry->rb_node;
|
2008-01-15 20:40:48 +07:00
|
|
|
rb_erase(node, &tree->tree);
|
2015-07-01 18:13:10 +07:00
|
|
|
RB_CLEAR_NODE(node);
|
2013-11-23 01:54:58 +07:00
|
|
|
if (tree->last == node)
|
|
|
|
tree->last = NULL;
|
2008-07-17 23:53:50 +07:00
|
|
|
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
|
2015-09-25 03:17:39 +07:00
|
|
|
if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
|
|
|
|
dec_pending_ordered = true;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irq(&tree->lock);
|
2008-07-24 22:57:52 +07:00
|
|
|
|
2015-09-25 03:17:39 +07:00
|
|
|
/*
|
|
|
|
* The current running transaction is waiting on us, we need to let it
|
|
|
|
* know that we're complete and wake it up.
|
|
|
|
*/
|
|
|
|
if (dec_pending_ordered) {
|
|
|
|
struct btrfs_transaction *trans;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The checks for trans are just a formality, it should be set,
|
|
|
|
* but if it isn't we don't want to deref/assert under the spin
|
|
|
|
* lock, so be nice and check if trans is set, but ASSERT() so
|
|
|
|
* if it isn't set a developer will notice.
|
|
|
|
*/
|
2016-06-23 05:54:23 +07:00
|
|
|
spin_lock(&fs_info->trans_lock);
|
|
|
|
trans = fs_info->running_transaction;
|
2015-09-25 03:17:39 +07:00
|
|
|
if (trans)
|
|
|
|
atomic_inc(&trans->use_count);
|
2016-06-23 05:54:23 +07:00
|
|
|
spin_unlock(&fs_info->trans_lock);
|
2015-09-25 03:17:39 +07:00
|
|
|
|
|
|
|
ASSERT(trans);
|
|
|
|
if (trans) {
|
|
|
|
if (atomic_dec_and_test(&trans->pending_ordered))
|
|
|
|
wake_up(&trans->pending_wait);
|
|
|
|
btrfs_put_transaction(trans);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-15 14:48:23 +07:00
|
|
|
spin_lock(&root->ordered_extent_lock);
|
2008-07-24 22:57:52 +07:00
|
|
|
list_del_init(&entry->root_extent_list);
|
2013-05-15 14:48:23 +07:00
|
|
|
root->nr_ordered_extents--;
|
2009-04-01 00:27:11 +07:00
|
|
|
|
Btrfs: add initial tracepoint support for btrfs
Tracepoints can provide insight into why btrfs hits bugs and be greatly
helpful for debugging, e.g
dd-7822 [000] 2121.641088: btrfs_inode_request: root = 5(FS_TREE), gen = 4, ino = 256, blocks = 8, disk_i_size = 0, last_trans = 8, logged_trans = 0
dd-7822 [000] 2121.641100: btrfs_inode_new: root = 5(FS_TREE), gen = 8, ino = 257, blocks = 0, disk_i_size = 0, last_trans = 0, logged_trans = 0
btrfs-transacti-7804 [001] 2146.935420: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29368320 (orig_level = 0), cow_buf = 29388800 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.935473: btrfs_cow_block: root = 1(ROOT_TREE), refs = 2, orig_buf = 29364224 (orig_level = 0), cow_buf = 29392896 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.972221: btrfs_transaction_commit: root = 1(ROOT_TREE), gen = 8
flush-btrfs-2-7821 [001] 2155.824210: btrfs_chunk_alloc: root = 3(CHUNK_TREE), offset = 1103101952, size = 1073741824, num_stripes = 1, sub_stripes = 0, type = DATA
flush-btrfs-2-7821 [001] 2155.824241: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29388800 (orig_level = 0), cow_buf = 29396992 (cow_level = 0)
flush-btrfs-2-7821 [001] 2155.824255: btrfs_cow_block: root = 4(DEV_TREE), refs = 2, orig_buf = 29372416 (orig_level = 0), cow_buf = 29401088 (cow_level = 0)
flush-btrfs-2-7821 [000] 2155.824329: btrfs_cow_block: root = 3(CHUNK_TREE), refs = 2, orig_buf = 20971520 (orig_level = 0), cow_buf = 20975616 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898019: btrfs_cow_block: root = 5(FS_TREE), refs = 2, orig_buf = 29384704 (orig_level = 0), cow_buf = 29405184 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898043: btrfs_cow_block: root = 7(CSUM_TREE), refs = 2, orig_buf = 29376512 (orig_level = 0), cow_buf = 29409280 (cow_level = 0)
Here is what I have added:
1) ordere_extent:
btrfs_ordered_extent_add
btrfs_ordered_extent_remove
btrfs_ordered_extent_start
btrfs_ordered_extent_put
These provide critical information to understand how ordered_extents are
updated.
2) extent_map:
btrfs_get_extent
extent_map is used in both read and write cases, and it is useful for tracking
how btrfs specific IO is running.
3) writepage:
__extent_writepage
btrfs_writepage_end_io_hook
Pages are cirtical resourses and produce a lot of corner cases during writeback,
so it is valuable to know how page is written to disk.
4) inode:
btrfs_inode_new
btrfs_inode_request
btrfs_inode_evict
These can show where and when a inode is created, when a inode is evicted.
5) sync:
btrfs_sync_file
btrfs_sync_fs
These show sync arguments.
6) transaction:
btrfs_transaction_commit
In transaction based filesystem, it will be useful to know the generation and
who does commit.
7) back reference and cow:
btrfs_delayed_tree_ref
btrfs_delayed_data_ref
btrfs_delayed_ref_head
btrfs_cow_block
Btrfs natively supports back references, these tracepoints are helpful on
understanding btrfs's COW mechanism.
8) chunk:
btrfs_chunk_alloc
btrfs_chunk_free
Chunk is a link between physical offset and logical offset, and stands for space
infomation in btrfs, and these are helpful on tracing space things.
9) reserved_extent:
btrfs_reserved_extent_alloc
btrfs_reserved_extent_free
These can show how btrfs uses its space.
Signed-off-by: Liu Bo <liubo2009@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2011-03-24 18:18:59 +07:00
|
|
|
trace_btrfs_ordered_extent_remove(inode, entry);
|
|
|
|
|
2013-05-15 14:48:23 +07:00
|
|
|
if (!root->nr_ordered_extents) {
|
2016-06-23 05:54:23 +07:00
|
|
|
spin_lock(&fs_info->ordered_root_lock);
|
2013-05-15 14:48:23 +07:00
|
|
|
BUG_ON(list_empty(&root->ordered_root));
|
|
|
|
list_del_init(&root->ordered_root);
|
2016-06-23 05:54:23 +07:00
|
|
|
spin_unlock(&fs_info->ordered_root_lock);
|
2013-05-15 14:48:23 +07:00
|
|
|
}
|
|
|
|
spin_unlock(&root->ordered_extent_lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
wake_up(&entry->wait);
|
2008-01-15 20:40:48 +07:00
|
|
|
}
|
|
|
|
|
2014-02-28 09:46:19 +07:00
|
|
|
static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
|
2012-10-25 16:41:36 +07:00
|
|
|
{
|
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
|
|
|
|
ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
|
|
|
|
btrfs_start_ordered_extent(ordered->inode, ordered, 1);
|
|
|
|
complete(&ordered->completion);
|
|
|
|
}
|
|
|
|
|
2008-09-30 02:18:18 +07:00
|
|
|
/*
|
|
|
|
* wait for all the ordered extents in a root. This is done when balancing
|
|
|
|
* space between drives.
|
|
|
|
*/
|
2016-04-26 21:36:38 +07:00
|
|
|
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
|
|
|
|
const u64 range_start, const u64 range_len)
|
2008-07-24 22:57:52 +07:00
|
|
|
{
|
2016-06-23 05:54:23 +07:00
|
|
|
struct btrfs_fs_info *fs_info = root->fs_info;
|
2016-04-26 21:36:38 +07:00
|
|
|
LIST_HEAD(splice);
|
|
|
|
LIST_HEAD(skipped);
|
|
|
|
LIST_HEAD(works);
|
2012-10-25 16:41:36 +07:00
|
|
|
struct btrfs_ordered_extent *ordered, *next;
|
2013-11-04 22:13:25 +07:00
|
|
|
int count = 0;
|
2016-04-26 21:36:38 +07:00
|
|
|
const u64 range_end = range_start + range_len;
|
2008-07-24 22:57:52 +07:00
|
|
|
|
2014-03-06 12:55:02 +07:00
|
|
|
mutex_lock(&root->ordered_extent_mutex);
|
2013-05-15 14:48:23 +07:00
|
|
|
spin_lock(&root->ordered_extent_lock);
|
|
|
|
list_splice_init(&root->ordered_extents, &splice);
|
2013-11-04 22:13:25 +07:00
|
|
|
while (!list_empty(&splice) && nr) {
|
2013-05-15 14:48:23 +07:00
|
|
|
ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
|
|
|
|
root_extent_list);
|
2016-04-26 21:36:38 +07:00
|
|
|
|
|
|
|
if (range_end <= ordered->start ||
|
|
|
|
ordered->start + ordered->disk_len <= range_start) {
|
|
|
|
list_move_tail(&ordered->root_extent_list, &skipped);
|
|
|
|
cond_resched_lock(&root->ordered_extent_lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-05-15 14:48:23 +07:00
|
|
|
list_move_tail(&ordered->root_extent_list,
|
|
|
|
&root->ordered_extents);
|
|
|
|
atomic_inc(&ordered->refs);
|
|
|
|
spin_unlock(&root->ordered_extent_lock);
|
2008-07-24 22:57:52 +07:00
|
|
|
|
2014-02-28 09:46:09 +07:00
|
|
|
btrfs_init_work(&ordered->flush_work,
|
Btrfs: fix task hang under heavy compressed write
This has been reported and discussed for a long time, and this hang occurs in
both 3.15 and 3.16.
Btrfs now migrates to use kernel workqueue, but it introduces this hang problem.
Btrfs has a kind of work queued as an ordered way, which means that its
ordered_func() must be processed in the way of FIFO, so it usually looks like --
normal_work_helper(arg)
work = container_of(arg, struct btrfs_work, normal_work);
work->func() <---- (we name it work X)
for ordered_work in wq->ordered_list
ordered_work->ordered_func()
ordered_work->ordered_free()
The hang is a rare case, first when we find free space, we get an uncached block
group, then we go to read its free space cache inode for free space information,
so it will
file a readahead request
btrfs_readpages()
for page that is not in page cache
__do_readpage()
submit_extent_page()
btrfs_submit_bio_hook()
btrfs_bio_wq_end_io()
submit_bio()
end_workqueue_bio() <--(ret by the 1st endio)
queue a work(named work Y) for the 2nd
also the real endio()
So the hang occurs when work Y's work_struct and work X's work_struct happens
to share the same address.
A bit more explanation,
A,B,C -- struct btrfs_work
arg -- struct work_struct
kthread:
worker_thread()
pick up a work_struct from @worklist
process_one_work(arg)
worker->current_work = arg; <-- arg is A->normal_work
worker->current_func(arg)
normal_work_helper(arg)
A = container_of(arg, struct btrfs_work, normal_work);
A->func()
A->ordered_func()
A->ordered_free() <-- A gets freed
B->ordered_func()
submit_compressed_extents()
find_free_extent()
load_free_space_inode()
... <-- (the above readhead stack)
end_workqueue_bio()
btrfs_queue_work(work C)
B->ordered_free()
As if work A has a high priority in wq->ordered_list and there are more ordered
works queued after it, such as B->ordered_func(), its memory could have been
freed before normal_work_helper() returns, which means that kernel workqueue
code worker_thread() still has worker->current_work pointer to be work
A->normal_work's, ie. arg's address.
Meanwhile, work C is allocated after work A is freed, work C->normal_work
and work A->normal_work are likely to share the same address(I confirmed this
with ftrace output, so I'm not just guessing, it's rare though).
When another kthread picks up work C->normal_work to process, and finds our
kthread is processing it(see find_worker_executing_work()), it'll think
work C as a collision and skip then, which ends up nobody processing work C.
So the situation is that our kthread is waiting forever on work C.
Besides, there're other cases that can lead to deadlock, but the real problem
is that all btrfs workqueue shares one work->func, -- normal_work_helper,
so this makes each workqueue to have its own helper function, but only a
wraper pf normal_work_helper.
With this patch, I no long hit the above hang.
Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Chris Mason <clm@fb.com>
2014-08-15 22:36:53 +07:00
|
|
|
btrfs_flush_delalloc_helper,
|
2014-02-28 09:46:09 +07:00
|
|
|
btrfs_run_ordered_extent_work, NULL, NULL);
|
2013-05-15 14:48:23 +07:00
|
|
|
list_add_tail(&ordered->work_list, &works);
|
2016-06-23 05:54:23 +07:00
|
|
|
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
|
2008-07-24 22:57:52 +07:00
|
|
|
|
2012-10-25 16:41:36 +07:00
|
|
|
cond_resched();
|
2013-05-15 14:48:23 +07:00
|
|
|
spin_lock(&root->ordered_extent_lock);
|
2013-11-04 22:13:25 +07:00
|
|
|
if (nr != -1)
|
|
|
|
nr--;
|
|
|
|
count++;
|
2008-07-24 22:57:52 +07:00
|
|
|
}
|
2016-04-26 21:36:38 +07:00
|
|
|
list_splice_tail(&skipped, &root->ordered_extents);
|
2013-11-04 22:13:25 +07:00
|
|
|
list_splice_tail(&splice, &root->ordered_extents);
|
2013-05-15 14:48:23 +07:00
|
|
|
spin_unlock(&root->ordered_extent_lock);
|
2012-10-25 16:41:36 +07:00
|
|
|
|
|
|
|
list_for_each_entry_safe(ordered, next, &works, work_list) {
|
|
|
|
list_del_init(&ordered->work_list);
|
|
|
|
wait_for_completion(&ordered->completion);
|
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
cond_resched();
|
|
|
|
}
|
2014-03-06 12:55:02 +07:00
|
|
|
mutex_unlock(&root->ordered_extent_mutex);
|
2013-11-04 22:13:25 +07:00
|
|
|
|
|
|
|
return count;
|
2008-07-24 22:57:52 +07:00
|
|
|
}
|
|
|
|
|
2016-05-14 15:12:53 +07:00
|
|
|
int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
|
2016-04-26 21:36:38 +07:00
|
|
|
const u64 range_start, const u64 range_len)
|
2013-05-15 14:48:23 +07:00
|
|
|
{
|
|
|
|
struct btrfs_root *root;
|
|
|
|
struct list_head splice;
|
2013-11-04 22:13:25 +07:00
|
|
|
int done;
|
2016-05-14 15:12:53 +07:00
|
|
|
int total_done = 0;
|
2013-05-15 14:48:23 +07:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&splice);
|
|
|
|
|
2014-03-06 12:54:55 +07:00
|
|
|
mutex_lock(&fs_info->ordered_operations_mutex);
|
2013-05-15 14:48:23 +07:00
|
|
|
spin_lock(&fs_info->ordered_root_lock);
|
|
|
|
list_splice_init(&fs_info->ordered_roots, &splice);
|
2013-11-04 22:13:25 +07:00
|
|
|
while (!list_empty(&splice) && nr) {
|
2013-05-15 14:48:23 +07:00
|
|
|
root = list_first_entry(&splice, struct btrfs_root,
|
|
|
|
ordered_root);
|
|
|
|
root = btrfs_grab_fs_root(root);
|
|
|
|
BUG_ON(!root);
|
|
|
|
list_move_tail(&root->ordered_root,
|
|
|
|
&fs_info->ordered_roots);
|
|
|
|
spin_unlock(&fs_info->ordered_root_lock);
|
|
|
|
|
2016-04-26 21:36:38 +07:00
|
|
|
done = btrfs_wait_ordered_extents(root, nr,
|
|
|
|
range_start, range_len);
|
2013-05-15 14:48:23 +07:00
|
|
|
btrfs_put_fs_root(root);
|
2016-05-14 15:12:53 +07:00
|
|
|
total_done += done;
|
2013-05-15 14:48:23 +07:00
|
|
|
|
|
|
|
spin_lock(&fs_info->ordered_root_lock);
|
2013-11-04 22:13:25 +07:00
|
|
|
if (nr != -1) {
|
|
|
|
nr -= done;
|
|
|
|
WARN_ON(nr < 0);
|
|
|
|
}
|
2013-05-15 14:48:23 +07:00
|
|
|
}
|
2013-11-14 16:33:21 +07:00
|
|
|
list_splice_tail(&splice, &fs_info->ordered_roots);
|
2013-05-15 14:48:23 +07:00
|
|
|
spin_unlock(&fs_info->ordered_root_lock);
|
2014-03-06 12:54:55 +07:00
|
|
|
mutex_unlock(&fs_info->ordered_operations_mutex);
|
2016-05-14 15:12:53 +07:00
|
|
|
|
|
|
|
return total_done;
|
2013-05-15 14:48:23 +07:00
|
|
|
}
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* Used to start IO or wait for a given ordered extent to finish.
|
|
|
|
*
|
|
|
|
* If wait is one, this effectively waits on page writeback for all the pages
|
|
|
|
* in the extent, and it waits on the io completion code to insert
|
|
|
|
* metadata into the btree corresponding to the extent
|
|
|
|
*/
|
|
|
|
void btrfs_start_ordered_extent(struct inode *inode,
|
|
|
|
struct btrfs_ordered_extent *entry,
|
|
|
|
int wait)
|
2008-07-17 23:53:50 +07:00
|
|
|
{
|
|
|
|
u64 start = entry->file_offset;
|
|
|
|
u64 end = start + entry->len - 1;
|
2008-05-27 21:55:43 +07:00
|
|
|
|
Btrfs: add initial tracepoint support for btrfs
Tracepoints can provide insight into why btrfs hits bugs and be greatly
helpful for debugging, e.g
dd-7822 [000] 2121.641088: btrfs_inode_request: root = 5(FS_TREE), gen = 4, ino = 256, blocks = 8, disk_i_size = 0, last_trans = 8, logged_trans = 0
dd-7822 [000] 2121.641100: btrfs_inode_new: root = 5(FS_TREE), gen = 8, ino = 257, blocks = 0, disk_i_size = 0, last_trans = 0, logged_trans = 0
btrfs-transacti-7804 [001] 2146.935420: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29368320 (orig_level = 0), cow_buf = 29388800 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.935473: btrfs_cow_block: root = 1(ROOT_TREE), refs = 2, orig_buf = 29364224 (orig_level = 0), cow_buf = 29392896 (cow_level = 0)
btrfs-transacti-7804 [001] 2146.972221: btrfs_transaction_commit: root = 1(ROOT_TREE), gen = 8
flush-btrfs-2-7821 [001] 2155.824210: btrfs_chunk_alloc: root = 3(CHUNK_TREE), offset = 1103101952, size = 1073741824, num_stripes = 1, sub_stripes = 0, type = DATA
flush-btrfs-2-7821 [001] 2155.824241: btrfs_cow_block: root = 2(EXTENT_TREE), refs = 2, orig_buf = 29388800 (orig_level = 0), cow_buf = 29396992 (cow_level = 0)
flush-btrfs-2-7821 [001] 2155.824255: btrfs_cow_block: root = 4(DEV_TREE), refs = 2, orig_buf = 29372416 (orig_level = 0), cow_buf = 29401088 (cow_level = 0)
flush-btrfs-2-7821 [000] 2155.824329: btrfs_cow_block: root = 3(CHUNK_TREE), refs = 2, orig_buf = 20971520 (orig_level = 0), cow_buf = 20975616 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898019: btrfs_cow_block: root = 5(FS_TREE), refs = 2, orig_buf = 29384704 (orig_level = 0), cow_buf = 29405184 (cow_level = 0)
btrfs-endio-wri-7800 [001] 2155.898043: btrfs_cow_block: root = 7(CSUM_TREE), refs = 2, orig_buf = 29376512 (orig_level = 0), cow_buf = 29409280 (cow_level = 0)
Here is what I have added:
1) ordere_extent:
btrfs_ordered_extent_add
btrfs_ordered_extent_remove
btrfs_ordered_extent_start
btrfs_ordered_extent_put
These provide critical information to understand how ordered_extents are
updated.
2) extent_map:
btrfs_get_extent
extent_map is used in both read and write cases, and it is useful for tracking
how btrfs specific IO is running.
3) writepage:
__extent_writepage
btrfs_writepage_end_io_hook
Pages are cirtical resourses and produce a lot of corner cases during writeback,
so it is valuable to know how page is written to disk.
4) inode:
btrfs_inode_new
btrfs_inode_request
btrfs_inode_evict
These can show where and when a inode is created, when a inode is evicted.
5) sync:
btrfs_sync_file
btrfs_sync_fs
These show sync arguments.
6) transaction:
btrfs_transaction_commit
In transaction based filesystem, it will be useful to know the generation and
who does commit.
7) back reference and cow:
btrfs_delayed_tree_ref
btrfs_delayed_data_ref
btrfs_delayed_ref_head
btrfs_cow_block
Btrfs natively supports back references, these tracepoints are helpful on
understanding btrfs's COW mechanism.
8) chunk:
btrfs_chunk_alloc
btrfs_chunk_free
Chunk is a link between physical offset and logical offset, and stands for space
infomation in btrfs, and these are helpful on tracing space things.
9) reserved_extent:
btrfs_reserved_extent_alloc
btrfs_reserved_extent_free
These can show how btrfs uses its space.
Signed-off-by: Liu Bo <liubo2009@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2011-03-24 18:18:59 +07:00
|
|
|
trace_btrfs_ordered_extent_start(inode, entry);
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* pages in the range can be dirty, clean or writeback. We
|
|
|
|
* start IO on any dirty ones so the wait doesn't stall waiting
|
2012-07-25 22:12:06 +07:00
|
|
|
* for the flusher thread to find them
|
2008-07-18 00:53:27 +07:00
|
|
|
*/
|
2010-05-23 22:00:55 +07:00
|
|
|
if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
|
|
|
|
filemap_fdatawrite_range(inode->i_mapping, start, end);
|
Btrfs: Add zlib compression support
This is a large change for adding compression on reading and writing,
both for inline and regular extents. It does some fairly large
surgery to the writeback paths.
Compression is off by default and enabled by mount -o compress. Even
when the -o compress mount option is not used, it is possible to read
compressed extents off the disk.
If compression for a given set of pages fails to make them smaller, the
file is flagged to avoid future compression attempts later.
* While finding delalloc extents, the pages are locked before being sent down
to the delalloc handler. This allows the delalloc handler to do complex things
such as cleaning the pages, marking them writeback and starting IO on their
behalf.
* Inline extents are inserted at delalloc time now. This allows us to compress
the data before inserting the inline extent, and it allows us to insert
an inline extent that spans multiple pages.
* All of the in-memory extent representations (extent_map.c, ordered-data.c etc)
are changed to record both an in-memory size and an on disk size, as well
as a flag for compression.
From a disk format point of view, the extent pointers in the file are changed
to record the on disk size of a given extent and some encoding flags.
Space in the disk format is allocated for compression encoding, as well
as encryption and a generic 'other' field. Neither the encryption or the
'other' field are currently used.
In order to limit the amount of data read for a single random read in the
file, the size of a compressed extent is limited to 128k. This is a
software only limit, the disk format supports u64 sized compressed extents.
In order to limit the ram consumed while processing extents, the uncompressed
size of a compressed extent is limited to 256k. This is a software only limit
and will be subject to tuning later.
Checksumming is still done on compressed extents, and it is done on the
uncompressed version of the data. This way additional encodings can be
layered on without having to figure out which encoding to checksum.
Compression happens at delalloc time, which is basically singled threaded because
it is usually done by a single pdflush thread. This makes it tricky to
spread the compression load across all the cpus on the box. We'll have to
look at parallel pdflush walks of dirty inodes at a later time.
Decompression is hooked into readpages and it does spread across CPUs nicely.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-10-30 01:49:59 +07:00
|
|
|
if (wait) {
|
2008-07-17 23:53:50 +07:00
|
|
|
wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
|
|
|
|
&entry->flags));
|
Btrfs: Add zlib compression support
This is a large change for adding compression on reading and writing,
both for inline and regular extents. It does some fairly large
surgery to the writeback paths.
Compression is off by default and enabled by mount -o compress. Even
when the -o compress mount option is not used, it is possible to read
compressed extents off the disk.
If compression for a given set of pages fails to make them smaller, the
file is flagged to avoid future compression attempts later.
* While finding delalloc extents, the pages are locked before being sent down
to the delalloc handler. This allows the delalloc handler to do complex things
such as cleaning the pages, marking them writeback and starting IO on their
behalf.
* Inline extents are inserted at delalloc time now. This allows us to compress
the data before inserting the inline extent, and it allows us to insert
an inline extent that spans multiple pages.
* All of the in-memory extent representations (extent_map.c, ordered-data.c etc)
are changed to record both an in-memory size and an on disk size, as well
as a flag for compression.
From a disk format point of view, the extent pointers in the file are changed
to record the on disk size of a given extent and some encoding flags.
Space in the disk format is allocated for compression encoding, as well
as encryption and a generic 'other' field. Neither the encryption or the
'other' field are currently used.
In order to limit the amount of data read for a single random read in the
file, the size of a compressed extent is limited to 128k. This is a
software only limit, the disk format supports u64 sized compressed extents.
In order to limit the ram consumed while processing extents, the uncompressed
size of a compressed extent is limited to 256k. This is a software only limit
and will be subject to tuning later.
Checksumming is still done on compressed extents, and it is done on the
uncompressed version of the data. This way additional encodings can be
layered on without having to figure out which encoding to checksum.
Compression happens at delalloc time, which is basically singled threaded because
it is usually done by a single pdflush thread. This makes it tricky to
spread the compression load across all the cpus on the box. We'll have to
look at parallel pdflush walks of dirty inodes at a later time.
Decompression is hooked into readpages and it does spread across CPUs nicely.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-10-30 01:49:59 +07:00
|
|
|
}
|
2008-07-17 23:53:50 +07:00
|
|
|
}
|
2008-01-15 20:40:48 +07:00
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* Used to wait on ordered extents across a large range of bytes.
|
|
|
|
*/
|
2013-10-26 03:13:35 +07:00
|
|
|
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
|
2008-07-17 23:53:50 +07:00
|
|
|
{
|
2013-10-26 03:13:35 +07:00
|
|
|
int ret = 0;
|
2015-05-06 01:03:10 +07:00
|
|
|
int ret_wb = 0;
|
2008-07-17 23:53:50 +07:00
|
|
|
u64 end;
|
2008-07-19 07:42:20 +07:00
|
|
|
u64 orig_end;
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_extent *ordered;
|
2008-07-19 07:42:20 +07:00
|
|
|
|
|
|
|
if (start + len < start) {
|
2008-07-22 22:18:09 +07:00
|
|
|
orig_end = INT_LIMIT(loff_t);
|
2008-07-19 07:42:20 +07:00
|
|
|
} else {
|
|
|
|
orig_end = start + len - 1;
|
2008-07-22 22:18:09 +07:00
|
|
|
if (orig_end > INT_LIMIT(loff_t))
|
|
|
|
orig_end = INT_LIMIT(loff_t);
|
2008-07-19 07:42:20 +07:00
|
|
|
}
|
2012-04-24 01:41:09 +07:00
|
|
|
|
2008-07-19 07:42:20 +07:00
|
|
|
/* start IO across the range first to instantiate any delalloc
|
|
|
|
* extents
|
|
|
|
*/
|
2014-10-10 15:43:11 +07:00
|
|
|
ret = btrfs_fdatawrite_range(inode, start, orig_end);
|
2013-10-26 03:13:35 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2014-10-10 15:43:11 +07:00
|
|
|
|
2015-05-06 01:03:10 +07:00
|
|
|
/*
|
|
|
|
* If we have a writeback error don't return immediately. Wait first
|
|
|
|
* for any ordered extents that haven't completed yet. This is to make
|
|
|
|
* sure no one can dirty the same page ranges and call writepages()
|
|
|
|
* before the ordered extents complete - to avoid failures (-EEXIST)
|
|
|
|
* when adding the new ordered extents to the ordered tree.
|
|
|
|
*/
|
|
|
|
ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
|
2008-07-19 07:42:20 +07:00
|
|
|
|
2008-07-22 22:18:09 +07:00
|
|
|
end = orig_end;
|
2009-01-06 09:25:51 +07:00
|
|
|
while (1) {
|
2008-07-17 23:53:50 +07:00
|
|
|
ordered = btrfs_lookup_first_ordered_extent(inode, end);
|
2009-01-06 09:25:51 +07:00
|
|
|
if (!ordered)
|
2008-07-17 23:53:50 +07:00
|
|
|
break;
|
2008-07-19 07:42:20 +07:00
|
|
|
if (ordered->file_offset > orig_end) {
|
2008-07-17 23:53:50 +07:00
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
break;
|
|
|
|
}
|
2013-11-06 22:12:40 +07:00
|
|
|
if (ordered->file_offset + ordered->len <= start) {
|
2008-07-17 23:53:50 +07:00
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
|
break;
|
|
|
|
}
|
2008-07-19 07:42:20 +07:00
|
|
|
btrfs_start_ordered_extent(inode, ordered, 1);
|
2008-07-17 23:53:50 +07:00
|
|
|
end = ordered->file_offset;
|
2013-10-26 03:13:35 +07:00
|
|
|
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
|
|
|
|
ret = -EIO;
|
2008-07-17 23:53:50 +07:00
|
|
|
btrfs_put_ordered_extent(ordered);
|
2013-10-26 03:13:35 +07:00
|
|
|
if (ret || end == 0 || end == start)
|
2008-07-17 23:53:50 +07:00
|
|
|
break;
|
|
|
|
end--;
|
|
|
|
}
|
2015-05-06 01:03:10 +07:00
|
|
|
return ret_wb ? ret_wb : ret;
|
2008-01-15 20:40:48 +07:00
|
|
|
}
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* find an ordered extent corresponding to file_offset. return NULL if
|
|
|
|
* nothing is found, otherwise take a reference on the extent and return it
|
|
|
|
*/
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
|
|
|
|
u64 file_offset)
|
|
|
|
{
|
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
|
|
|
struct rb_node *node;
|
|
|
|
struct btrfs_ordered_extent *entry = NULL;
|
|
|
|
|
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
node = tree_search(tree, file_offset);
|
|
|
|
if (!node)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
|
|
|
if (!offset_in_entry(entry, file_offset))
|
|
|
|
entry = NULL;
|
|
|
|
if (entry)
|
|
|
|
atomic_inc(&entry->refs);
|
|
|
|
out:
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irq(&tree->lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2010-05-23 22:00:55 +07:00
|
|
|
/* Since the DIO code tries to lock a wide area we need to look for any ordered
|
|
|
|
* extents that exist in the range, rather than just the start of the range.
|
|
|
|
*/
|
|
|
|
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
|
|
|
|
u64 file_offset,
|
|
|
|
u64 len)
|
|
|
|
{
|
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
|
|
|
struct rb_node *node;
|
|
|
|
struct btrfs_ordered_extent *entry = NULL;
|
|
|
|
|
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
2010-05-23 22:00:55 +07:00
|
|
|
node = tree_search(tree, file_offset);
|
|
|
|
if (!node) {
|
|
|
|
node = tree_search(tree, file_offset + len);
|
|
|
|
if (!node)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
|
|
|
if (range_overlaps(entry, file_offset, len))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (entry->file_offset >= file_offset + len) {
|
|
|
|
entry = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
entry = NULL;
|
|
|
|
node = rb_next(node);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
if (entry)
|
|
|
|
atomic_inc(&entry->refs);
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irq(&tree->lock);
|
2010-05-23 22:00:55 +07:00
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2015-03-31 20:16:52 +07:00
|
|
|
bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
|
|
|
u64 file_offset,
|
|
|
|
u64 len)
|
|
|
|
{
|
|
|
|
struct btrfs_ordered_extent *oe;
|
|
|
|
|
|
|
|
oe = btrfs_lookup_ordered_range(inode, file_offset, len);
|
|
|
|
if (oe) {
|
|
|
|
btrfs_put_ordered_extent(oe);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* lookup and return any extent before 'file_offset'. NULL is returned
|
|
|
|
* if none is found
|
|
|
|
*/
|
2008-07-17 23:53:50 +07:00
|
|
|
struct btrfs_ordered_extent *
|
2009-01-06 09:25:51 +07:00
|
|
|
btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
|
2008-07-17 23:53:50 +07:00
|
|
|
{
|
|
|
|
struct btrfs_ordered_inode_tree *tree;
|
|
|
|
struct rb_node *node;
|
|
|
|
struct btrfs_ordered_extent *entry = NULL;
|
|
|
|
|
|
|
|
tree = &BTRFS_I(inode)->ordered_tree;
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
node = tree_search(tree, file_offset);
|
|
|
|
if (!node)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
|
|
|
atomic_inc(&entry->refs);
|
|
|
|
out:
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irq(&tree->lock);
|
2008-07-17 23:53:50 +07:00
|
|
|
return entry;
|
2008-04-25 19:51:48 +07:00
|
|
|
}
|
2008-07-17 23:54:05 +07:00
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* After an extent is done, call this to conditionally update the on disk
|
|
|
|
* i_size. i_size is updated to cover any fully written part of the file.
|
|
|
|
*/
|
2009-11-12 16:34:21 +07:00
|
|
|
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
|
2008-07-17 23:54:05 +07:00
|
|
|
struct btrfs_ordered_extent *ordered)
|
|
|
|
{
|
|
|
|
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
|
|
|
|
u64 disk_i_size;
|
|
|
|
u64 new_i_size;
|
2009-11-12 16:34:21 +07:00
|
|
|
u64 i_size = i_size_read(inode);
|
2008-07-17 23:54:05 +07:00
|
|
|
struct rb_node *node;
|
2009-11-12 16:34:21 +07:00
|
|
|
struct rb_node *prev = NULL;
|
2008-07-17 23:54:05 +07:00
|
|
|
struct btrfs_ordered_extent *test;
|
2009-11-12 16:34:21 +07:00
|
|
|
int ret = 1;
|
btrfs: fix disk_i_size update bug when fallocate() fails
When doing truncate operation, btrfs_setsize() will first call
truncate_setsize() to set new inode->i_size, but if later
btrfs_truncate() fails, btrfs_setsize() will call
"i_size_write(inode, BTRFS_I(inode)->disk_i_size)" to reset the
inmemory inode size, now bug occurs. It's because for truncate
case btrfs_ordered_update_i_size() directly uses inode->i_size
to update BTRFS_I(inode)->disk_i_size, indeed we should use the
"offset" argument to update disk_i_size. Here is the call graph:
==>btrfs_truncate()
====>btrfs_truncate_inode_items()
======>btrfs_ordered_update_i_size(inode, last_size, NULL);
Here btrfs_ordered_update_i_size()'s offset argument is last_size.
And below test case can reveal this bug:
dd if=/dev/zero of=fs.img bs=$((1024*1024)) count=100
dev=$(losetup --show -f fs.img)
mkdir -p /mnt/mntpoint
mkfs.btrfs -f $dev
mount $dev /mnt/mntpoint
cd /mnt/mntpoint
echo "workdir is: /mnt/mntpoint"
blocksize=$((128 * 1024))
dd if=/dev/zero of=testfile bs=$blocksize count=1
sync
count=$((17*1024*1024*1024/blocksize))
echo "file size is:" $((count*blocksize))
for ((i = 1; i <= $count; i++)); do
i=$((i + 1))
dst_offset=$((blocksize * i))
xfs_io -f -c "reflink testfile 0 $dst_offset $blocksize"\
testfile > /dev/null
done
sync
truncate --size 0 testfile
ls -l testfile
du -sh testfile
exit
In this case, truncate operation will fail for enospc reason and
"du -sh testfile" returns value greater than 0, but testfile's
size is 0, we need to reflect correct inode->i_size.
Signed-off-by: Wang Xiaoguang <wangxg.fnst@cn.fujitsu.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
2016-06-22 08:57:01 +07:00
|
|
|
u64 orig_offset = offset;
|
2009-11-12 16:34:21 +07:00
|
|
|
|
2013-08-30 00:57:21 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
|
|
|
if (ordered) {
|
2009-11-12 16:34:21 +07:00
|
|
|
offset = entry_end(ordered);
|
2013-08-30 00:57:21 +07:00
|
|
|
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
|
|
|
|
offset = min(offset,
|
|
|
|
ordered->file_offset +
|
|
|
|
ordered->truncated_len);
|
|
|
|
} else {
|
2016-06-15 20:22:56 +07:00
|
|
|
offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
|
2013-08-30 00:57:21 +07:00
|
|
|
}
|
2008-07-17 23:54:05 +07:00
|
|
|
disk_i_size = BTRFS_I(inode)->disk_i_size;
|
|
|
|
|
2009-11-12 16:34:21 +07:00
|
|
|
/* truncate file */
|
|
|
|
if (disk_i_size > i_size) {
|
btrfs: fix disk_i_size update bug when fallocate() fails
When doing truncate operation, btrfs_setsize() will first call
truncate_setsize() to set new inode->i_size, but if later
btrfs_truncate() fails, btrfs_setsize() will call
"i_size_write(inode, BTRFS_I(inode)->disk_i_size)" to reset the
inmemory inode size, now bug occurs. It's because for truncate
case btrfs_ordered_update_i_size() directly uses inode->i_size
to update BTRFS_I(inode)->disk_i_size, indeed we should use the
"offset" argument to update disk_i_size. Here is the call graph:
==>btrfs_truncate()
====>btrfs_truncate_inode_items()
======>btrfs_ordered_update_i_size(inode, last_size, NULL);
Here btrfs_ordered_update_i_size()'s offset argument is last_size.
And below test case can reveal this bug:
dd if=/dev/zero of=fs.img bs=$((1024*1024)) count=100
dev=$(losetup --show -f fs.img)
mkdir -p /mnt/mntpoint
mkfs.btrfs -f $dev
mount $dev /mnt/mntpoint
cd /mnt/mntpoint
echo "workdir is: /mnt/mntpoint"
blocksize=$((128 * 1024))
dd if=/dev/zero of=testfile bs=$blocksize count=1
sync
count=$((17*1024*1024*1024/blocksize))
echo "file size is:" $((count*blocksize))
for ((i = 1; i <= $count; i++)); do
i=$((i + 1))
dst_offset=$((blocksize * i))
xfs_io -f -c "reflink testfile 0 $dst_offset $blocksize"\
testfile > /dev/null
done
sync
truncate --size 0 testfile
ls -l testfile
du -sh testfile
exit
In this case, truncate operation will fail for enospc reason and
"du -sh testfile" returns value greater than 0, but testfile's
size is 0, we need to reflect correct inode->i_size.
Signed-off-by: Wang Xiaoguang <wangxg.fnst@cn.fujitsu.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
2016-06-22 08:57:01 +07:00
|
|
|
BTRFS_I(inode)->disk_i_size = orig_offset;
|
2009-11-12 16:34:21 +07:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-07-17 23:54:05 +07:00
|
|
|
/*
|
|
|
|
* if the disk i_size is already at the inode->i_size, or
|
|
|
|
* this ordered extent is inside the disk i_size, we're done
|
|
|
|
*/
|
2013-01-31 02:17:31 +07:00
|
|
|
if (disk_i_size == i_size)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We still need to update disk_i_size if outstanding_isize is greater
|
|
|
|
* than disk_i_size.
|
|
|
|
*/
|
|
|
|
if (offset <= disk_i_size &&
|
|
|
|
(!ordered || ordered->outstanding_isize <= disk_i_size))
|
2008-07-17 23:54:05 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* walk backward from this ordered extent to disk_i_size.
|
|
|
|
* if we find an ordered extent then we can't update disk i_size
|
|
|
|
* yet
|
|
|
|
*/
|
2009-11-12 16:34:21 +07:00
|
|
|
if (ordered) {
|
|
|
|
node = rb_prev(&ordered->rb_node);
|
|
|
|
} else {
|
|
|
|
prev = tree_search(tree, offset);
|
|
|
|
/*
|
|
|
|
* we insert file extents without involving ordered struct,
|
|
|
|
* so there should be no ordered struct cover this offset
|
|
|
|
*/
|
|
|
|
if (prev) {
|
|
|
|
test = rb_entry(prev, struct btrfs_ordered_extent,
|
|
|
|
rb_node);
|
|
|
|
BUG_ON(offset_in_entry(test, offset));
|
|
|
|
}
|
|
|
|
node = prev;
|
|
|
|
}
|
2012-05-03 01:00:54 +07:00
|
|
|
for (; node; node = rb_prev(node)) {
|
2008-07-17 23:54:05 +07:00
|
|
|
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
2012-05-03 01:00:54 +07:00
|
|
|
|
2016-03-05 02:23:12 +07:00
|
|
|
/* We treat this entry as if it doesn't exist */
|
2012-05-03 01:00:54 +07:00
|
|
|
if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
|
|
|
|
continue;
|
2008-07-17 23:54:05 +07:00
|
|
|
if (test->file_offset + test->len <= disk_i_size)
|
|
|
|
break;
|
2009-11-12 16:34:21 +07:00
|
|
|
if (test->file_offset >= i_size)
|
2008-07-17 23:54:05 +07:00
|
|
|
break;
|
2013-01-31 02:31:31 +07:00
|
|
|
if (entry_end(test) > disk_i_size) {
|
2012-09-06 17:01:21 +07:00
|
|
|
/*
|
|
|
|
* we don't update disk_i_size now, so record this
|
|
|
|
* undealt i_size. Or we will not know the real
|
|
|
|
* i_size.
|
|
|
|
*/
|
|
|
|
if (test->outstanding_isize < offset)
|
|
|
|
test->outstanding_isize = offset;
|
|
|
|
if (ordered &&
|
|
|
|
ordered->outstanding_isize >
|
|
|
|
test->outstanding_isize)
|
|
|
|
test->outstanding_isize =
|
|
|
|
ordered->outstanding_isize;
|
2008-07-17 23:54:05 +07:00
|
|
|
goto out;
|
2012-05-03 01:00:54 +07:00
|
|
|
}
|
2008-07-17 23:54:05 +07:00
|
|
|
}
|
2012-09-06 17:01:21 +07:00
|
|
|
new_i_size = min_t(u64, offset, i_size);
|
2008-07-17 23:54:05 +07:00
|
|
|
|
|
|
|
/*
|
2012-09-06 17:01:21 +07:00
|
|
|
* Some ordered extents may completed before the current one, and
|
|
|
|
* we hold the real i_size in ->outstanding_isize.
|
2008-07-17 23:54:05 +07:00
|
|
|
*/
|
2012-09-06 17:01:21 +07:00
|
|
|
if (ordered && ordered->outstanding_isize > new_i_size)
|
|
|
|
new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
|
2008-07-17 23:54:05 +07:00
|
|
|
BTRFS_I(inode)->disk_i_size = new_i_size;
|
2009-11-12 16:34:21 +07:00
|
|
|
ret = 0;
|
2008-07-17 23:54:05 +07:00
|
|
|
out:
|
2009-11-12 16:34:21 +07:00
|
|
|
/*
|
2012-05-03 01:00:54 +07:00
|
|
|
* We need to do this because we can't remove ordered extents until
|
|
|
|
* after the i_disk_size has been updated and then the inode has been
|
|
|
|
* updated to reflect the change, so we need to tell anybody who finds
|
|
|
|
* this ordered extent that we've already done all the real work, we
|
|
|
|
* just haven't completed all the other work.
|
2009-11-12 16:34:21 +07:00
|
|
|
*/
|
|
|
|
if (ordered)
|
2012-05-03 01:00:54 +07:00
|
|
|
set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
|
|
|
|
spin_unlock_irq(&tree->lock);
|
2009-11-12 16:34:21 +07:00
|
|
|
return ret;
|
2008-07-17 23:54:05 +07:00
|
|
|
}
|
2008-07-17 23:54:15 +07:00
|
|
|
|
2008-07-18 00:53:27 +07:00
|
|
|
/*
|
|
|
|
* search the ordered extents for one corresponding to 'offset' and
|
|
|
|
* try to find a checksum. This is used because we allow pages to
|
|
|
|
* be reclaimed before their checksum is actually put into the btree
|
|
|
|
*/
|
Btrfs: move data checksumming into a dedicated tree
Btrfs stores checksums for each data block. Until now, they have
been stored in the subvolume trees, indexed by the inode that is
referencing the data block. This means that when we read the inode,
we've probably read in at least some checksums as well.
But, this has a few problems:
* The checksums are indexed by logical offset in the file. When
compression is on, this means we have to do the expensive checksumming
on the uncompressed data. It would be faster if we could checksum
the compressed data instead.
* If we implement encryption, we'll be checksumming the plain text and
storing that on disk. This is significantly less secure.
* For either compression or encryption, we have to get the plain text
back before we can verify the checksum as correct. This makes the raid
layer balancing and extent moving much more expensive.
* It makes the front end caching code more complex, as we have touch
the subvolume and inodes as we cache extents.
* There is potentitally one copy of the checksum in each subvolume
referencing an extent.
The solution used here is to store the extent checksums in a dedicated
tree. This allows us to index the checksums by phyiscal extent
start and length. It means:
* The checksum is against the data stored on disk, after any compression
or encryption is done.
* The checksum is stored in a central location, and can be verified without
following back references, or reading inodes.
This makes compression significantly faster by reducing the amount of
data that needs to be checksummed. It will also allow much faster
raid management code in general.
The checksums are indexed by a key with a fixed objectid (a magic value
in ctree.h) and offset set to the starting byte of the extent. This
allows us to copy the checksum items into the fsync log tree directly (or
any other tree), without having to invent a second format for them.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-12-09 04:58:54 +07:00
|
|
|
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
2013-04-05 14:20:56 +07:00
|
|
|
u32 *sum, int len)
|
2008-07-17 23:54:15 +07:00
|
|
|
{
|
|
|
|
struct btrfs_ordered_sum *ordered_sum;
|
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
|
2008-07-18 17:17:13 +07:00
|
|
|
unsigned long num_sectors;
|
|
|
|
unsigned long i;
|
2016-06-15 20:22:56 +07:00
|
|
|
u32 sectorsize = btrfs_inode_sectorsize(inode);
|
2013-04-05 14:20:56 +07:00
|
|
|
int index = 0;
|
2008-07-17 23:54:15 +07:00
|
|
|
|
|
|
|
ordered = btrfs_lookup_ordered_extent(inode, offset);
|
|
|
|
if (!ordered)
|
2013-04-05 14:20:56 +07:00
|
|
|
return 0;
|
2008-07-17 23:54:15 +07:00
|
|
|
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_lock_irq(&tree->lock);
|
2009-01-21 22:59:08 +07:00
|
|
|
list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
|
2013-04-05 14:20:56 +07:00
|
|
|
if (disk_bytenr >= ordered_sum->bytenr &&
|
|
|
|
disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
|
|
|
|
i = (disk_bytenr - ordered_sum->bytenr) >>
|
|
|
|
inode->i_sb->s_blocksize_bits;
|
|
|
|
num_sectors = ordered_sum->len >>
|
|
|
|
inode->i_sb->s_blocksize_bits;
|
Btrfs: remove btrfs_sector_sum structure
Using the structure btrfs_sector_sum to keep the checksum value is
unnecessary, because the extents that btrfs_sector_sum points to are
continuous, we can find out the expected checksums by btrfs_ordered_sum's
bytenr and the offset, so we can remove btrfs_sector_sum's bytenr. After
removing bytenr, there is only one member in the structure, so it makes
no sense to keep the structure, just remove it, and use a u32 array to
store the checksum value.
By this change, we don't use the while loop to get the checksums one by
one. Now, we can get several checksum value at one time, it improved the
performance by ~74% on my SSD (31MB/s -> 54MB/s).
test command:
# dd if=/dev/zero of=/mnt/btrfs/file0 bs=1M count=1024 oflag=sync
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
2013-06-19 09:36:09 +07:00
|
|
|
num_sectors = min_t(int, len - index, num_sectors - i);
|
|
|
|
memcpy(sum + index, ordered_sum->sums + i,
|
|
|
|
num_sectors);
|
|
|
|
|
|
|
|
index += (int)num_sectors;
|
|
|
|
if (index == len)
|
|
|
|
goto out;
|
|
|
|
disk_bytenr += num_sectors * sectorsize;
|
2008-07-17 23:54:15 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
2012-05-03 01:00:54 +07:00
|
|
|
spin_unlock_irq(&tree->lock);
|
2008-07-24 20:41:53 +07:00
|
|
|
btrfs_put_ordered_extent(ordered);
|
2013-04-05 14:20:56 +07:00
|
|
|
return index;
|
2008-07-17 23:54:15 +07:00
|
|
|
}
|
|
|
|
|
2012-09-06 17:01:51 +07:00
|
|
|
int __init ordered_data_init(void)
|
|
|
|
{
|
|
|
|
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
|
|
|
|
sizeof(struct btrfs_ordered_extent), 0,
|
2016-06-24 01:17:08 +07:00
|
|
|
SLAB_MEM_SPREAD,
|
2012-09-06 17:01:51 +07:00
|
|
|
NULL);
|
|
|
|
if (!btrfs_ordered_extent_cache)
|
|
|
|
return -ENOMEM;
|
2012-10-25 16:31:03 +07:00
|
|
|
|
2012-09-06 17:01:51 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ordered_data_exit(void)
|
|
|
|
{
|
2016-01-29 20:36:35 +07:00
|
|
|
kmem_cache_destroy(btrfs_ordered_extent_cache);
|
2012-09-06 17:01:51 +07:00
|
|
|
}
|