mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 02:06:43 +07:00
btrfs: migrate the delayed refs rsv code
These belong with the delayed refs related code, not in extent-tree.c. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
9978059be8
commit
6ef03debdb
@ -2622,8 +2622,6 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
|
||||
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
|
||||
}
|
||||
|
||||
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
|
||||
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
|
||||
const u64 start);
|
||||
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
|
||||
@ -2784,13 +2782,6 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
|
||||
bool qgroup_free);
|
||||
int btrfs_delalloc_reserve_space(struct inode *inode,
|
||||
struct extent_changeset **reserved, u64 start, u64 len);
|
||||
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
|
||||
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
|
||||
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
|
||||
enum btrfs_reserve_flush_enum flush);
|
||||
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *src,
|
||||
u64 num_bytes);
|
||||
int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "delayed-ref.h"
|
||||
#include "transaction.h"
|
||||
#include "qgroup.h"
|
||||
#include "space-info.h"
|
||||
|
||||
struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
@ -24,6 +25,179 @@ struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
* of hammering updates on the extent allocation tree.
|
||||
*/
|
||||
|
||||
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
|
||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||
bool ret = false;
|
||||
u64 reserved;
|
||||
|
||||
spin_lock(&global_rsv->lock);
|
||||
reserved = global_rsv->reserved;
|
||||
spin_unlock(&global_rsv->lock);
|
||||
|
||||
/*
|
||||
* Since the global reserve is just kind of magic we don't really want
|
||||
* to rely on it to save our bacon, so if our size is more than the
|
||||
* delayed_refs_rsv and the global rsv then it's time to think about
|
||||
* bailing.
|
||||
*/
|
||||
spin_lock(&delayed_refs_rsv->lock);
|
||||
reserved += delayed_refs_rsv->reserved;
|
||||
if (delayed_refs_rsv->size >= reserved)
|
||||
ret = true;
|
||||
spin_unlock(&delayed_refs_rsv->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
u64 num_entries =
|
||||
atomic_read(&trans->transaction->delayed_refs.num_entries);
|
||||
u64 avg_runtime;
|
||||
u64 val;
|
||||
|
||||
smp_mb();
|
||||
avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
|
||||
val = num_entries * avg_runtime;
|
||||
if (val >= NSEC_PER_SEC)
|
||||
return 1;
|
||||
if (val >= NSEC_PER_SEC / 2)
|
||||
return 2;
|
||||
|
||||
return btrfs_check_space_for_delayed_refs(trans->fs_info);
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_delayed_refs_rsv_release - release a ref head's reservation.
|
||||
* @fs_info - the fs_info for our fs.
|
||||
* @nr - the number of items to drop.
|
||||
*
|
||||
* This drops the delayed ref head's count from the delayed refs rsv and frees
|
||||
* any excess reservation we had.
|
||||
*/
|
||||
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
|
||||
{
|
||||
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, nr);
|
||||
u64 released = 0;
|
||||
|
||||
released = __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes,
|
||||
NULL);
|
||||
if (released)
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
|
||||
0, released, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
|
||||
* @trans - the trans that may have generated delayed refs
|
||||
*
|
||||
* This is to be called anytime we may have adjusted trans->delayed_ref_updates,
|
||||
* it'll calculate the additional size and add it to the delayed_refs_rsv.
|
||||
*/
|
||||
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 num_bytes;
|
||||
|
||||
if (!trans->delayed_ref_updates)
|
||||
return;
|
||||
|
||||
num_bytes = btrfs_calc_trans_metadata_size(fs_info,
|
||||
trans->delayed_ref_updates);
|
||||
spin_lock(&delayed_rsv->lock);
|
||||
delayed_rsv->size += num_bytes;
|
||||
delayed_rsv->full = 0;
|
||||
spin_unlock(&delayed_rsv->lock);
|
||||
trans->delayed_ref_updates = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
|
||||
* @fs_info - the fs info for our fs.
|
||||
* @src - the source block rsv to transfer from.
|
||||
* @num_bytes - the number of bytes to transfer.
|
||||
*
|
||||
* This transfers up to the num_bytes amount from the src rsv to the
|
||||
* delayed_refs_rsv. Any extra bytes are returned to the space info.
|
||||
*/
|
||||
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *src,
|
||||
u64 num_bytes)
|
||||
{
|
||||
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 to_free = 0;
|
||||
|
||||
spin_lock(&src->lock);
|
||||
src->reserved -= num_bytes;
|
||||
src->size -= num_bytes;
|
||||
spin_unlock(&src->lock);
|
||||
|
||||
spin_lock(&delayed_refs_rsv->lock);
|
||||
if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
|
||||
u64 delta = delayed_refs_rsv->size -
|
||||
delayed_refs_rsv->reserved;
|
||||
if (num_bytes > delta) {
|
||||
to_free = num_bytes - delta;
|
||||
num_bytes = delta;
|
||||
}
|
||||
} else {
|
||||
to_free = num_bytes;
|
||||
num_bytes = 0;
|
||||
}
|
||||
|
||||
if (num_bytes)
|
||||
delayed_refs_rsv->reserved += num_bytes;
|
||||
if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
|
||||
delayed_refs_rsv->full = 1;
|
||||
spin_unlock(&delayed_refs_rsv->lock);
|
||||
|
||||
if (num_bytes)
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
|
||||
0, num_bytes, 1);
|
||||
if (to_free)
|
||||
btrfs_space_info_add_old_bytes(fs_info,
|
||||
delayed_refs_rsv->space_info, to_free);
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
|
||||
* @fs_info - the fs_info for our fs.
|
||||
* @flush - control how we can flush for this reservation.
|
||||
*
|
||||
* This will refill the delayed block_rsv up to 1 items size worth of space and
|
||||
* will return -ENOSPC if we can't make the reservation.
|
||||
*/
|
||||
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
|
||||
enum btrfs_reserve_flush_enum flush)
|
||||
{
|
||||
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 limit = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
u64 num_bytes = 0;
|
||||
int ret = -ENOSPC;
|
||||
|
||||
spin_lock(&block_rsv->lock);
|
||||
if (block_rsv->reserved < block_rsv->size) {
|
||||
num_bytes = block_rsv->size - block_rsv->reserved;
|
||||
num_bytes = min(num_bytes, limit);
|
||||
}
|
||||
spin_unlock(&block_rsv->lock);
|
||||
|
||||
if (!num_bytes)
|
||||
return 0;
|
||||
|
||||
ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
|
||||
num_bytes, flush);
|
||||
if (ret)
|
||||
return ret;
|
||||
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
|
||||
0, num_bytes, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* compare two delayed tree backrefs with same bytenr and type
|
||||
*/
|
||||
|
@ -364,6 +364,16 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
|
||||
|
||||
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
|
||||
|
||||
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
|
||||
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
|
||||
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
|
||||
enum btrfs_reserve_flush_enum flush);
|
||||
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *src,
|
||||
u64 num_bytes);
|
||||
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
|
||||
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
|
||||
|
||||
/*
|
||||
* helper functions to cast a node into its container
|
||||
*/
|
||||
|
@ -2753,49 +2753,6 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
|
||||
return num_csums;
|
||||
}
|
||||
|
||||
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
|
||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||
bool ret = false;
|
||||
u64 reserved;
|
||||
|
||||
spin_lock(&global_rsv->lock);
|
||||
reserved = global_rsv->reserved;
|
||||
spin_unlock(&global_rsv->lock);
|
||||
|
||||
/*
|
||||
* Since the global reserve is just kind of magic we don't really want
|
||||
* to rely on it to save our bacon, so if our size is more than the
|
||||
* delayed_refs_rsv and the global rsv then it's time to think about
|
||||
* bailing.
|
||||
*/
|
||||
spin_lock(&delayed_refs_rsv->lock);
|
||||
reserved += delayed_refs_rsv->reserved;
|
||||
if (delayed_refs_rsv->size >= reserved)
|
||||
ret = true;
|
||||
spin_unlock(&delayed_refs_rsv->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
u64 num_entries =
|
||||
atomic_read(&trans->transaction->delayed_refs.num_entries);
|
||||
u64 avg_runtime;
|
||||
u64 val;
|
||||
|
||||
smp_mb();
|
||||
avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
|
||||
val = num_entries * avg_runtime;
|
||||
if (val >= NSEC_PER_SEC)
|
||||
return 1;
|
||||
if (val >= NSEC_PER_SEC / 2)
|
||||
return 2;
|
||||
|
||||
return btrfs_check_space_for_delayed_refs(trans->fs_info);
|
||||
}
|
||||
|
||||
/*
|
||||
* this starts processing the delayed reference count updates and
|
||||
* extent insertions we have queued up so far. count can be
|
||||
@ -4347,90 +4304,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
|
||||
* @fs_info - the fs info for our fs.
|
||||
* @src - the source block rsv to transfer from.
|
||||
* @num_bytes - the number of bytes to transfer.
|
||||
*
|
||||
* This transfers up to the num_bytes amount from the src rsv to the
|
||||
* delayed_refs_rsv. Any extra bytes are returned to the space info.
|
||||
*/
|
||||
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *src,
|
||||
u64 num_bytes)
|
||||
{
|
||||
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 to_free = 0;
|
||||
|
||||
spin_lock(&src->lock);
|
||||
src->reserved -= num_bytes;
|
||||
src->size -= num_bytes;
|
||||
spin_unlock(&src->lock);
|
||||
|
||||
spin_lock(&delayed_refs_rsv->lock);
|
||||
if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
|
||||
u64 delta = delayed_refs_rsv->size -
|
||||
delayed_refs_rsv->reserved;
|
||||
if (num_bytes > delta) {
|
||||
to_free = num_bytes - delta;
|
||||
num_bytes = delta;
|
||||
}
|
||||
} else {
|
||||
to_free = num_bytes;
|
||||
num_bytes = 0;
|
||||
}
|
||||
|
||||
if (num_bytes)
|
||||
delayed_refs_rsv->reserved += num_bytes;
|
||||
if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
|
||||
delayed_refs_rsv->full = 1;
|
||||
spin_unlock(&delayed_refs_rsv->lock);
|
||||
|
||||
if (num_bytes)
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
|
||||
0, num_bytes, 1);
|
||||
if (to_free)
|
||||
btrfs_space_info_add_old_bytes(fs_info,
|
||||
delayed_refs_rsv->space_info, to_free);
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
|
||||
* @fs_info - the fs_info for our fs.
|
||||
* @flush - control how we can flush for this reservation.
|
||||
*
|
||||
* This will refill the delayed block_rsv up to 1 items size worth of space and
|
||||
* will return -ENOSPC if we can't make the reservation.
|
||||
*/
|
||||
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
|
||||
enum btrfs_reserve_flush_enum flush)
|
||||
{
|
||||
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 limit = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
u64 num_bytes = 0;
|
||||
int ret = -ENOSPC;
|
||||
|
||||
spin_lock(&block_rsv->lock);
|
||||
if (block_rsv->reserved < block_rsv->size) {
|
||||
num_bytes = block_rsv->size - block_rsv->reserved;
|
||||
num_bytes = min(num_bytes, limit);
|
||||
}
|
||||
spin_unlock(&block_rsv->lock);
|
||||
|
||||
if (!num_bytes)
|
||||
return 0;
|
||||
|
||||
ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
|
||||
num_bytes, flush);
|
||||
if (ret)
|
||||
return ret;
|
||||
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
|
||||
0, num_bytes, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_inode_rsv_release - release any excessive reservation.
|
||||
* @inode - the inode we need to release from.
|
||||
@ -4466,53 +4339,6 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
|
||||
qgroup_to_release);
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_delayed_refs_rsv_release - release a ref head's reservation.
|
||||
* @fs_info - the fs_info for our fs.
|
||||
* @nr - the number of items to drop.
|
||||
*
|
||||
* This drops the delayed ref head's count from the delayed refs rsv and frees
|
||||
* any excess reservation we had.
|
||||
*/
|
||||
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
|
||||
{
|
||||
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, nr);
|
||||
u64 released = 0;
|
||||
|
||||
released = __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes,
|
||||
NULL);
|
||||
if (released)
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
|
||||
0, released, 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
|
||||
* @trans - the trans that may have generated delayed refs
|
||||
*
|
||||
* This is to be called anytime we may have adjusted trans->delayed_ref_updates,
|
||||
* it'll calculate the additional size and add it to the delayed_refs_rsv.
|
||||
*/
|
||||
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
|
||||
u64 num_bytes;
|
||||
|
||||
if (!trans->delayed_ref_updates)
|
||||
return;
|
||||
|
||||
num_bytes = btrfs_calc_trans_metadata_size(fs_info,
|
||||
trans->delayed_ref_updates);
|
||||
spin_lock(&delayed_rsv->lock);
|
||||
delayed_rsv->size += num_bytes;
|
||||
delayed_rsv->full = 0;
|
||||
spin_unlock(&delayed_rsv->lock);
|
||||
trans->delayed_ref_updates = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* To be called after all the new block groups attached to the transaction
|
||||
* handle have been created (btrfs_create_pending_block_groups()).
|
||||
|
Loading…
Reference in New Issue
Block a user