mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 07:00:52 +07:00
54f290efac
commit bc0939fcfab0d7efb2ed12896b1af3d819954a14 upstream. We have a race between marking that an inode needs to be logged, either at btrfs_set_inode_last_trans() or at btrfs_page_mkwrite(), and between btrfs_sync_log(). The following steps describe how the race happens. 1) We are at transaction N; 2) Inode I was previously fsynced in the current transaction so it has: inode->logged_trans set to N; 3) The inode's root currently has: root->log_transid set to 1 root->last_log_commit set to 0 Which means only one log transaction was committed to far, log transaction 0. When a log tree is created we set ->log_transid and ->last_log_commit of its parent root to 0 (at btrfs_add_log_tree()); 4) One more range of pages is dirtied in inode I; 5) Some task A starts an fsync against some other inode J (same root), and so it joins log transaction 1. Before task A calls btrfs_sync_log()... 6) Task B starts an fsync against inode I, which currently has the full sync flag set, so it starts delalloc and waits for the ordered extent to complete before calling btrfs_inode_in_log() at btrfs_sync_file(); 7) During ordered extent completion we have btrfs_update_inode() called against inode I, which in turn calls btrfs_set_inode_last_trans(), which does the following: spin_lock(&inode->lock); inode->last_trans = trans->transaction->transid; inode->last_sub_trans = inode->root->log_transid; inode->last_log_commit = inode->root->last_log_commit; spin_unlock(&inode->lock); So ->last_trans is set to N and ->last_sub_trans set to 1. But before setting ->last_log_commit... 8) Task A is at btrfs_sync_log(): - it increments root->log_transid to 2 - starts writeback for all log tree extent buffers - waits for the writeback to complete - writes the super blocks - updates root->last_log_commit to 1 It's a lot of slow steps between updating root->log_transid and root->last_log_commit; 9) The task doing the ordered extent completion, currently at btrfs_set_inode_last_trans(), then finally runs: inode->last_log_commit = inode->root->last_log_commit; spin_unlock(&inode->lock); Which results in inode->last_log_commit being set to 1. The ordered extent completes; 10) Task B is resumed, and it calls btrfs_inode_in_log() which returns true because we have all the following conditions met: inode->logged_trans == N which matches fs_info->generation && inode->last_subtrans (1) <= inode->last_log_commit (1) && inode->last_subtrans (1) <= root->last_log_commit (1) && list inode->extent_tree.modified_extents is empty And as a consequence we return without logging the inode, so the existing logged version of the inode does not point to the extent that was written after the previous fsync. It should be impossible in practice for one task be able to do so much progress in btrfs_sync_log() while another task is at btrfs_set_inode_last_trans() right after it reads root->log_transid and before it reads root->last_log_commit. Even if kernel preemption is enabled we know the task at btrfs_set_inode_last_trans() can not be preempted because it is holding the inode's spinlock. However there is another place where we do the same without holding the spinlock, which is in the memory mapped write path at: vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) { (...) BTRFS_I(inode)->last_trans = fs_info->generation; BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; (...) So with preemption happening after setting ->last_sub_trans and before setting ->last_log_commit, it is less of a stretch to have another task do enough progress at btrfs_sync_log() such that the task doing the memory mapped write ends up with ->last_sub_trans and ->last_log_commit set to the same value. It is still a big stretch to get there, as the task doing btrfs_sync_log() has to start writeback, wait for its completion and write the super blocks. So fix this in two different ways: 1) For btrfs_set_inode_last_trans(), simply set ->last_log_commit to the value of ->last_sub_trans minus 1; 2) For btrfs_page_mkwrite() only set the inode's ->last_sub_trans, just like we do for buffered and direct writes at btrfs_file_write_iter(), which is all we need to make sure multiple writes and fsyncs to an inode in the same transaction never result in an fsync missing that the inode changed and needs to be logged. Turn this into a helper function and use it both at btrfs_page_mkwrite() and at btrfs_file_write_iter() - this also fixes the problem that at btrfs_page_mkwrite() we were setting those fields without the protection of the inode's spinlock. This is an extremely unlikely race to happen in practice. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: David Sterba <dsterba@suse.com> Signed-off-by: Anand Jain <anand.jain@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
293 lines
9.1 KiB
C
293 lines
9.1 KiB
C
#ifndef MY_ABC_HERE
|
|
#define MY_ABC_HERE
|
|
#endif
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
*/
|
|
|
|
#ifndef BTRFS_TRANSACTION_H
|
|
#define BTRFS_TRANSACTION_H
|
|
|
|
#include <linux/refcount.h>
|
|
#include "btrfs_inode.h"
|
|
#include "delayed-ref.h"
|
|
#include "ctree.h"
|
|
#if defined(MY_ABC_HERE)
|
|
#include "disk-io.h"
|
|
#endif /* MY_ABC_HERE */
|
|
|
|
enum btrfs_trans_state {
|
|
TRANS_STATE_RUNNING,
|
|
TRANS_STATE_COMMIT_START,
|
|
TRANS_STATE_COMMIT_DOING,
|
|
TRANS_STATE_UNBLOCKED,
|
|
TRANS_STATE_COMPLETED,
|
|
TRANS_STATE_MAX,
|
|
};
|
|
|
|
#define BTRFS_TRANS_HAVE_FREE_BGS 0
|
|
#define BTRFS_TRANS_DIRTY_BG_RUN 1
|
|
#define BTRFS_TRANS_CACHE_ENOSPC 2
|
|
|
|
struct btrfs_transaction {
|
|
u64 transid;
|
|
/*
|
|
* total external writers(USERSPACE/START/ATTACH) in this
|
|
* transaction, it must be zero before the transaction is
|
|
* being committed
|
|
*/
|
|
atomic_t num_extwriters;
|
|
/*
|
|
* total writers in this transaction, it must be zero before the
|
|
* transaction can end
|
|
*/
|
|
atomic_t num_writers;
|
|
refcount_t use_count;
|
|
|
|
unsigned long flags;
|
|
|
|
/* Be protected by fs_info->trans_lock when we want to change it. */
|
|
enum btrfs_trans_state state;
|
|
int aborted;
|
|
struct list_head list;
|
|
struct extent_io_tree dirty_pages;
|
|
time64_t start_time;
|
|
wait_queue_head_t writer_wait;
|
|
wait_queue_head_t commit_wait;
|
|
struct list_head pending_snapshots;
|
|
struct list_head dev_update_list;
|
|
struct list_head switch_commits;
|
|
struct list_head dirty_bgs;
|
|
|
|
/*
|
|
* There is no explicit lock which protects io_bgs, rather its
|
|
* consistency is implied by the fact that all the sites which modify
|
|
* it do so under some form of transaction critical section, namely:
|
|
*
|
|
* - btrfs_start_dirty_block_groups - This function can only ever be
|
|
* run by one of the transaction committers. Refer to
|
|
* BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
|
|
*
|
|
* - btrfs_write_dirty_blockgroups - this is called by
|
|
* commit_cowonly_roots from transaction critical section
|
|
* (TRANS_STATE_COMMIT_DOING)
|
|
*
|
|
* - btrfs_cleanup_dirty_bgs - called on transaction abort
|
|
*/
|
|
struct list_head io_bgs;
|
|
struct list_head dropped_roots;
|
|
struct extent_io_tree pinned_extents;
|
|
|
|
/*
|
|
* we need to make sure block group deletion doesn't race with
|
|
* free space cache writeout. This mutex keeps them from stomping
|
|
* on each other
|
|
*/
|
|
struct mutex cache_write_mutex;
|
|
spinlock_t dirty_bgs_lock;
|
|
/* Protected by spin lock fs_info->unused_bgs_lock. */
|
|
struct list_head deleted_bgs;
|
|
spinlock_t dropped_roots_lock;
|
|
struct btrfs_delayed_ref_root delayed_refs;
|
|
struct btrfs_fs_info *fs_info;
|
|
|
|
/*
|
|
* Number of ordered extents the transaction must wait for before
|
|
* committing. These are ordered extents started by a fast fsync.
|
|
*/
|
|
atomic_t pending_ordered;
|
|
wait_queue_head_t pending_wait;
|
|
|
|
#ifdef MY_ABC_HERE
|
|
struct list_head quota_account_list;
|
|
spinlock_t quota_account_lock;
|
|
|
|
// Used for quota v1 chown.
|
|
struct rw_semaphore delayed_refs_rw_sem;
|
|
#endif /* MY_ABC_HERE */
|
|
};
|
|
|
|
#define __TRANS_FREEZABLE (1U << 0)
|
|
|
|
#define __TRANS_START (1U << 9)
|
|
#define __TRANS_ATTACH (1U << 10)
|
|
#define __TRANS_JOIN (1U << 11)
|
|
#define __TRANS_JOIN_NOLOCK (1U << 12)
|
|
#define __TRANS_DUMMY (1U << 13)
|
|
#define __TRANS_JOIN_NOSTART (1U << 14)
|
|
|
|
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
|
|
#define TRANS_ATTACH (__TRANS_ATTACH)
|
|
#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
|
|
#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
|
|
#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
|
|
|
|
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
|
|
|
|
#define BTRFS_SEND_TRANS_STUB ((void *)1)
|
|
#define BTRFS_DIO_SYNC_STUB ((void *)2)
|
|
|
|
struct btrfs_trans_handle {
|
|
u64 transid;
|
|
u64 bytes_reserved;
|
|
u64 chunk_bytes_reserved;
|
|
unsigned long delayed_ref_updates;
|
|
struct btrfs_transaction *transaction;
|
|
struct btrfs_block_rsv *block_rsv;
|
|
struct btrfs_block_rsv *orig_rsv;
|
|
refcount_t use_count;
|
|
unsigned int type;
|
|
/*
|
|
* Error code of transaction abort, set outside of locks and must use
|
|
* the READ_ONCE/WRITE_ONCE access
|
|
*/
|
|
short aborted;
|
|
bool adding_csums;
|
|
bool allocating_chunk;
|
|
bool can_flush_pending_bgs;
|
|
bool reloc_reserved;
|
|
bool dirty;
|
|
struct btrfs_root *root;
|
|
struct btrfs_fs_info *fs_info;
|
|
struct list_head new_bgs;
|
|
#ifdef MY_ABC_HERE
|
|
bool syno_usage;
|
|
#endif /* MY_ABC_HERE */
|
|
#ifdef MY_ABC_HERE
|
|
struct btrfs_delayed_ref_throttle_ticket *syno_delayed_ref_throttle_ticket;
|
|
unsigned long total_delayed_ref_updates;
|
|
bool skip_throttle;
|
|
#endif /* MY_ABC_HERE */
|
|
#ifdef MY_ABC_HERE
|
|
bool cleaner;
|
|
#endif /* MY_ABC_HERE */
|
|
};
|
|
|
|
#ifdef MY_ABC_HERE
|
|
static inline void syno_total_delayed_ref_updates_dec(struct btrfs_trans_handle *trans)
|
|
{
|
|
if (likely(trans) && trans->total_delayed_ref_updates)
|
|
trans->total_delayed_ref_updates--;
|
|
}
|
|
static inline void syno_total_delayed_ref_updates_inc(struct btrfs_trans_handle *trans)
|
|
{
|
|
if (likely(trans))
|
|
trans->total_delayed_ref_updates++;
|
|
}
|
|
#endif /* MY_ABC_HERE */
|
|
|
|
/*
|
|
* The abort status can be changed between calls and is not protected by locks.
|
|
* This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
|
|
* set to a non-zero value it does not change, so the macro should be in checks
|
|
* but is not necessary for further reads of the value.
|
|
*/
|
|
#define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted)))
|
|
|
|
struct btrfs_pending_snapshot {
|
|
struct dentry *dentry;
|
|
struct inode *dir;
|
|
struct btrfs_root *root;
|
|
struct btrfs_root_item *root_item;
|
|
struct btrfs_root *snap;
|
|
struct btrfs_qgroup_inherit *inherit;
|
|
#ifdef MY_ABC_HERE
|
|
u64 copy_limit_from;
|
|
#endif /* MY_ABC_HERE */
|
|
struct btrfs_path *path;
|
|
/* block reservation for the operation */
|
|
struct btrfs_block_rsv block_rsv;
|
|
/* extra metadata reservation for relocation */
|
|
int error;
|
|
/* Preallocated anonymous block device number */
|
|
dev_t anon_dev;
|
|
bool readonly;
|
|
struct list_head list;
|
|
#if defined(MY_ABC_HERE)
|
|
/* Preallocated new_fs_root_args */
|
|
struct btrfs_new_fs_root_args *new_fs_root_args;
|
|
#endif /* MY_ABC_HERE */
|
|
};
|
|
|
|
static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
|
|
struct btrfs_inode *inode)
|
|
{
|
|
spin_lock(&inode->lock);
|
|
inode->last_trans = trans->transaction->transid;
|
|
inode->last_sub_trans = inode->root->log_transid;
|
|
inode->last_log_commit = inode->last_sub_trans - 1;
|
|
spin_unlock(&inode->lock);
|
|
}
|
|
|
|
/*
|
|
* Make qgroup codes to skip given qgroupid, means the old/new_roots for
|
|
* qgroup won't contain the qgroupid in it.
|
|
*/
|
|
static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
|
|
u64 qgroupid)
|
|
{
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
|
|
delayed_refs = &trans->transaction->delayed_refs;
|
|
WARN_ON(delayed_refs->qgroup_to_skip);
|
|
delayed_refs->qgroup_to_skip = qgroupid;
|
|
}
|
|
|
|
static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
|
|
{
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
|
|
delayed_refs = &trans->transaction->delayed_refs;
|
|
WARN_ON(!delayed_refs->qgroup_to_skip);
|
|
delayed_refs->qgroup_to_skip = 0;
|
|
}
|
|
|
|
int btrfs_end_transaction(struct btrfs_trans_handle *trans);
|
|
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
|
|
unsigned int num_items);
|
|
struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
|
|
struct btrfs_root *root,
|
|
unsigned int num_items);
|
|
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
|
|
struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
|
|
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
|
|
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
|
|
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
|
|
struct btrfs_root *root);
|
|
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
|
|
|
|
void btrfs_add_dead_root(struct btrfs_root *root);
|
|
#if defined(MY_ABC_HERE) || defined(MY_ABC_HERE)
|
|
void btrfs_add_dead_root_head(struct btrfs_root *root);
|
|
#endif /* MY_ABC_HERE || MY_ABC_HERE */
|
|
int btrfs_defrag_root(struct btrfs_root *root);
|
|
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
|
|
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
|
|
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
|
|
int wait_for_unblock);
|
|
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
|
|
#ifdef MY_ABC_HERE
|
|
int btrfs_throttle_delayed_refs(struct btrfs_root *root, unsigned long total_delayed_ref_updates);
|
|
#endif /* MY_ABC_HERE */
|
|
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
|
|
void btrfs_throttle(struct btrfs_fs_info *fs_info);
|
|
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *root);
|
|
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
|
|
struct extent_io_tree *dirty_pages, int mark
|
|
#ifdef MY_ABC_HERE
|
|
, u64 *total_count, u64 *total_size
|
|
#endif /* MY_ABC_HERE */
|
|
);
|
|
int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
|
|
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
|
|
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
|
|
void btrfs_put_transaction(struct btrfs_transaction *transaction);
|
|
void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
|
|
void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
|
|
struct btrfs_root *root);
|
|
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
|
|
|
|
#endif
|