mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 14:30:58 +07:00
b3422cacdd
Before this patch, the rgrp code had a serious problem related to how it managed buffer_heads for resource groups. The problem caused file system corruption, especially in cases of journal replay. When an rgrp glock was demoted to transfer ownership to a different cluster node, do_xmote() first calls rgrp_go_sync and then rgrp_go_inval, as expected. When it calls rgrp_go_sync, that called gfs2_rgrp_brelse() that dropped the buffer_head reference count. In most cases, the reference count went to zero, which is right. However, there were other places where the buffers are handled differently. After rgrp_go_sync, do_xmote called rgrp_go_inval which called gfs2_rgrp_brelse a second time, then rgrp_go_inval's call to truncate_inode_pages_range would get rid of the pages in memory, but only if the reference count drops to 0. Unfortunately, gfs2_rgrp_brelse was setting bi->bi_bh = NULL. So when rgrp_go_sync called gfs2_rgrp_brelse, it lost the pointer to the buffer_heads in cases where the reference count was still 1. Therefore, when rgrp_go_inval called gfs2_rgrp_brelse a second time, it failed the check for "if (bi->bi_bh)" and thus failed to call brelse a second time. Because of that, the reference count on those buffers sometimes failed to drop from 1 to 0. And that caused function truncate_inode_pages_range to keep the pages in page cache rather than freeing them. The next time the rgrp glock was acquired, the metadata read of the rgrp buffers re-used the pages in memory, which were now wrong because they were likely modified by the other node who acquired the glock in EX (which is why we demoted the glock). This re-use of the page cache caused corruption because changes made by the other nodes were never seen, so the bitmaps were inaccurate. For some reason, the problem became most apparent when journal replay forced the replay of rgrps in memory, which caused newer rgrp data to be overwritten by the older in-core pages. A big part of the problem was that the rgrp buffer were released in multiple places: The go_unlock function would release them when the glock was released rather than when the glock is demoted, which is clearly wrong because our intent was to cache them until the glock is demoted from SH or EX. This patch attempts to clean up the mess and make one consistent and centralized mechanism for managing the rgrp buffer_heads by implementing several changes: 1. It eliminates the call to gfs2_rgrp_brelse() from rgrp_go_sync. We don't want to release the buffers or zero the pointers when syncing for the reasons stated above. It only makes sense to release them when the glock is actually invalidated (go_inval). And when we do, then we set the bh pointers to NULL. 2. The go_unlock function (which was only used for rgrps) is eliminated, as we've talked about doing many times before. The go_unlock function was called too early in the glock dq process, and should not happen until the glock is invalidated. 3. It also eliminates the call to rgrp_brelse in gfs2_clear_rgrpd. That will now happen automatically when the rgrp glocks are demoted, and shouldn't happen any sooner or later than that. Instead, function gfs2_clear_rgrpd has been modified to demote the rgrp glocks, and therefore, free those pages, before the remaining glocks are culled by gfs2_gl_hash_clear. This prevents the gl_object from hanging around when the glocks are culled. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Reviewed-by: Andreas Gruenbacher <agruenba@redhat.com>
93 lines
3.3 KiB
C
93 lines
3.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
|
*/
|
|
|
|
#ifndef __RGRP_DOT_H__
|
|
#define __RGRP_DOT_H__
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
/* Since each block in the file system is represented by two bits in the
|
|
* bitmap, one 64-bit word in the bitmap will represent 32 blocks.
|
|
* By reserving 32 blocks at a time, we can optimize / shortcut how we search
|
|
* through the bitmaps by looking a word at a time.
|
|
*/
|
|
#define RGRP_RSRV_MINBLKS 32
|
|
#define RGRP_RSRV_ADDBLKS 64
|
|
|
|
struct gfs2_rgrpd;
|
|
struct gfs2_sbd;
|
|
struct gfs2_holder;
|
|
|
|
extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
|
|
|
|
extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
|
|
extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
|
|
extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
|
|
|
|
extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
|
|
extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
|
|
extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
|
|
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
|
|
extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
|
|
|
|
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
|
|
|
|
#define GFS2_AF_ORLOV 1
|
|
extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
|
|
struct gfs2_alloc_parms *ap);
|
|
extern void gfs2_inplace_release(struct gfs2_inode *ip);
|
|
|
|
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
|
|
bool dinode, u64 *generation);
|
|
|
|
extern int gfs2_rsqa_alloc(struct gfs2_inode *ip);
|
|
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
|
|
extern void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount);
|
|
extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
|
|
u64 bstart, u32 blen, int meta);
|
|
extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
|
|
u64 bstart, u32 blen);
|
|
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
|
|
extern void gfs2_unlink_di(struct inode *inode);
|
|
extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
|
|
unsigned int type);
|
|
|
|
struct gfs2_rgrp_list {
|
|
unsigned int rl_rgrps;
|
|
unsigned int rl_space;
|
|
struct gfs2_rgrpd **rl_rgd;
|
|
struct gfs2_holder *rl_ghs;
|
|
};
|
|
|
|
extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
|
|
u64 block);
|
|
extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
|
|
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
|
|
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
|
|
extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
|
|
const char *fs_id_buf);
|
|
extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
|
struct buffer_head *bh,
|
|
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
|
|
extern int gfs2_fitrim(struct file *filp, void __user *argp);
|
|
|
|
/* This is how to tell if a reservation is in the rgrp tree: */
|
|
static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
|
|
{
|
|
return rs && !RB_EMPTY_NODE(&rs->rs_node);
|
|
}
|
|
|
|
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
|
|
{
|
|
u64 first = rgd->rd_data0;
|
|
u64 last = first + rgd->rd_data;
|
|
return first <= block && block < last;
|
|
}
|
|
|
|
extern void check_and_update_goal(struct gfs2_inode *ip);
|
|
#endif /* __RGRP_DOT_H__ */
|