mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 00:00:52 +07:00
Merge branch 'gfs2-iopen' into for-next
This commit is contained in:
commit
300e549b6e
@ -134,7 +134,9 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct inode *inode;
|
||||
|
||||
inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
|
||||
if (!inum->no_formal_ino)
|
||||
return ERR_PTR(-ESTALE);
|
||||
inode = gfs2_lookup_by_inum(sdp, inum->no_addr, inum->no_formal_ino,
|
||||
GFS2_BLKST_DINODE);
|
||||
if (IS_ERR(inode))
|
||||
return ERR_CAST(inode);
|
||||
|
176
fs/gfs2/glock.c
176
fs/gfs2/glock.c
@ -125,12 +125,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
|
||||
{
|
||||
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
|
||||
|
||||
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
|
||||
kfree(gl->gl_lksb.sb_lvbptr);
|
||||
if (gl->gl_ops->go_flags & GLOF_ASPACE)
|
||||
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
|
||||
} else {
|
||||
kfree(gl->gl_lksb.sb_lvbptr);
|
||||
else
|
||||
kmem_cache_free(gfs2_glock_cachep, gl);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -465,6 +464,15 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
|
||||
gl->gl_tchange = jiffies;
|
||||
}
|
||||
|
||||
static void gfs2_set_demote(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
set_bit(GLF_DEMOTE, &gl->gl_flags);
|
||||
smp_mb();
|
||||
wake_up(&sdp->sd_async_glock_wait);
|
||||
}
|
||||
|
||||
static void gfs2_demote_wake(struct gfs2_glock *gl)
|
||||
{
|
||||
gl->gl_demote_state = LM_ST_EXCLUSIVE;
|
||||
@ -757,20 +765,127 @@ __acquires(&gl->gl_lockref.lock)
|
||||
return;
|
||||
}
|
||||
|
||||
void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
|
||||
{
|
||||
struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
|
||||
|
||||
if (ri->ri_magic == 0)
|
||||
ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
|
||||
ri->ri_generation_deleted = cpu_to_be64(generation);
|
||||
}
|
||||
|
||||
bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
|
||||
{
|
||||
struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
|
||||
|
||||
if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
|
||||
return false;
|
||||
return generation <= be64_to_cpu(ri->ri_generation_deleted);
|
||||
}
|
||||
|
||||
static void gfs2_glock_poke(struct gfs2_glock *gl)
|
||||
{
|
||||
int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
|
||||
struct gfs2_holder gh;
|
||||
int error;
|
||||
|
||||
error = gfs2_glock_nq_init(gl, LM_ST_SHARED, flags, &gh);
|
||||
if (!error)
|
||||
gfs2_glock_dq(&gh);
|
||||
}
|
||||
|
||||
static bool gfs2_try_evict(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip;
|
||||
bool evicted = false;
|
||||
|
||||
/*
|
||||
* If there is contention on the iopen glock and we have an inode, try
|
||||
* to grab and release the inode so that it can be evicted. This will
|
||||
* allow the remote node to go ahead and delete the inode without us
|
||||
* having to do it, which will avoid rgrp glock thrashing.
|
||||
*
|
||||
* The remote node is likely still holding the corresponding inode
|
||||
* glock, so it will run before we get to verify that the delete has
|
||||
* happened below.
|
||||
*/
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
ip = gl->gl_object;
|
||||
if (ip && !igrab(&ip->i_inode))
|
||||
ip = NULL;
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (ip) {
|
||||
struct gfs2_glock *inode_gl = NULL;
|
||||
|
||||
gl->gl_no_formal_ino = ip->i_no_formal_ino;
|
||||
set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
|
||||
d_prune_aliases(&ip->i_inode);
|
||||
iput(&ip->i_inode);
|
||||
|
||||
/* If the inode was evicted, gl->gl_object will now be NULL. */
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
ip = gl->gl_object;
|
||||
if (ip) {
|
||||
inode_gl = ip->i_gl;
|
||||
lockref_get(&inode_gl->gl_lockref);
|
||||
clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
|
||||
}
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (inode_gl) {
|
||||
gfs2_glock_poke(inode_gl);
|
||||
gfs2_glock_put(inode_gl);
|
||||
}
|
||||
evicted = !ip;
|
||||
}
|
||||
return evicted;
|
||||
}
|
||||
|
||||
static void delete_work_func(struct work_struct *work)
|
||||
{
|
||||
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct inode *inode;
|
||||
u64 no_addr = gl->gl_name.ln_number;
|
||||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
/* If someone's using this glock to create a new dinode, the block must
|
||||
have been freed by another node, then re-used, in which case our
|
||||
iopen callback is too late after the fact. Ignore it. */
|
||||
if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
|
||||
goto out;
|
||||
|
||||
inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
|
||||
if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
|
||||
/*
|
||||
* If we can evict the inode, give the remote node trying to
|
||||
* delete the inode some time before verifying that the delete
|
||||
* has happened. Otherwise, if we cause contention on the inode glock
|
||||
* immediately, the remote node will think that we still have
|
||||
* the inode in use, and so it will give up waiting.
|
||||
*
|
||||
* If we can't evict the inode, signal to the remote node that
|
||||
* the inode is still in use. We'll later try to delete the
|
||||
* inode locally in gfs2_evict_inode.
|
||||
*
|
||||
* FIXME: We only need to verify that the remote node has
|
||||
* deleted the inode because nodes before this remote delete
|
||||
* rework won't cooperate. At a later time, when we no longer
|
||||
* care about compatibility with such nodes, we can skip this
|
||||
* step entirely.
|
||||
*/
|
||||
if (gfs2_try_evict(gl)) {
|
||||
if (gfs2_queue_delete_work(gl, 5 * HZ))
|
||||
return;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
|
||||
GFS2_BLKST_UNLINKED);
|
||||
if (!IS_ERR_OR_NULL(inode)) {
|
||||
d_prune_aliases(inode);
|
||||
iput(inode);
|
||||
@ -801,7 +916,7 @@ static void glock_work_func(struct work_struct *work)
|
||||
|
||||
if (!delay) {
|
||||
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
|
||||
set_bit(GLF_DEMOTE, &gl->gl_flags);
|
||||
gfs2_set_demote(gl);
|
||||
}
|
||||
}
|
||||
run_queue(gl, 0);
|
||||
@ -932,7 +1047,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
||||
gl->gl_object = NULL;
|
||||
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
|
||||
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
|
||||
INIT_WORK(&gl->gl_delete, delete_work_func);
|
||||
INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
|
||||
|
||||
mapping = gfs2_glock2aspace(gl);
|
||||
if (mapping) {
|
||||
@ -1146,9 +1261,10 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
|
||||
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
|
||||
unsigned long delay, bool remote)
|
||||
{
|
||||
int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
|
||||
|
||||
set_bit(bit, &gl->gl_flags);
|
||||
if (delay)
|
||||
set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
|
||||
else
|
||||
gfs2_set_demote(gl);
|
||||
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
|
||||
gl->gl_demote_state = state;
|
||||
gl->gl_demote_time = jiffies;
|
||||
@ -1755,6 +1871,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
|
||||
rhashtable_walk_exit(&iter);
|
||||
}
|
||||
|
||||
bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
|
||||
{
|
||||
bool queued;
|
||||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
queued = queue_delayed_work(gfs2_delete_workqueue,
|
||||
&gl->gl_delete, delay);
|
||||
if (queued)
|
||||
set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
return queued;
|
||||
}
|
||||
|
||||
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
|
||||
{
|
||||
if (cancel_delayed_work_sync(&gl->gl_delete)) {
|
||||
clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
}
|
||||
|
||||
bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
|
||||
{
|
||||
return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||
}
|
||||
|
||||
static void flush_delete_work(struct gfs2_glock *gl)
|
||||
{
|
||||
flush_delayed_work(&gl->gl_delete);
|
||||
gfs2_glock_queue_work(gl, 0);
|
||||
}
|
||||
|
||||
void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
|
||||
{
|
||||
glock_hash_walk(flush_delete_work, sdp);
|
||||
flush_workqueue(gfs2_delete_workqueue);
|
||||
}
|
||||
|
||||
/**
|
||||
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
|
||||
* @gl: The glock to thaw
|
||||
|
@ -244,6 +244,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
|
||||
|
||||
extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
|
||||
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
|
||||
extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
|
||||
extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
|
||||
extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
|
||||
extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
|
||||
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
|
||||
@ -315,4 +319,7 @@ static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
|
||||
extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
|
||||
|
||||
#endif /* __GLOCK_DOT_H__ */
|
||||
|
@ -612,11 +612,17 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
||||
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
||||
gl->gl_state == LM_ST_SHARED && ip) {
|
||||
gl->gl_lockref.count++;
|
||||
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
|
||||
if (!queue_delayed_work(gfs2_delete_workqueue,
|
||||
&gl->gl_delete, 0))
|
||||
gl->gl_lockref.count--;
|
||||
}
|
||||
}
|
||||
|
||||
static int iopen_go_demote_ok(const struct gfs2_glock *gl)
|
||||
{
|
||||
return !gfs2_delete_work_queued(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
|
||||
* @gl: glock being freed
|
||||
@ -696,7 +702,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
|
||||
.go_lock = inode_go_lock,
|
||||
.go_dump = inode_go_dump,
|
||||
.go_type = LM_TYPE_INODE,
|
||||
.go_flags = GLOF_ASPACE | GLOF_LRU,
|
||||
.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
|
||||
.go_free = inode_go_free,
|
||||
};
|
||||
|
||||
@ -720,6 +726,7 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
|
||||
const struct gfs2_glock_operations gfs2_iopen_glops = {
|
||||
.go_type = LM_TYPE_IOPEN,
|
||||
.go_callback = iopen_go_callback,
|
||||
.go_demote_ok = iopen_go_demote_ok,
|
||||
.go_flags = GLOF_LRU | GLOF_NONDISK,
|
||||
};
|
||||
|
||||
|
@ -345,6 +345,7 @@ enum {
|
||||
GLF_OBJECT = 14, /* Used only for tracing */
|
||||
GLF_BLOCKING = 15,
|
||||
GLF_INODE_CREATING = 16, /* Inode creation occurring */
|
||||
GLF_PENDING_DELETE = 17,
|
||||
GLF_FREEING = 18, /* Wait for glock to be freed */
|
||||
};
|
||||
|
||||
@ -378,8 +379,11 @@ struct gfs2_glock {
|
||||
atomic_t gl_revokes;
|
||||
struct delayed_work gl_work;
|
||||
union {
|
||||
/* For inode and iopen glocks only */
|
||||
struct work_struct gl_delete;
|
||||
/* For iopen glocks only */
|
||||
struct {
|
||||
struct delayed_work gl_delete;
|
||||
u64 gl_no_formal_ino;
|
||||
};
|
||||
/* For rgrp glocks only */
|
||||
struct {
|
||||
loff_t start;
|
||||
@ -398,6 +402,7 @@ enum {
|
||||
GIF_ORDERED = 4,
|
||||
GIF_FREE_VFS_INODE = 5,
|
||||
GIF_GLOP_PENDING = 6,
|
||||
GIF_DEFERRED_DELETE = 7,
|
||||
};
|
||||
|
||||
struct gfs2_inode {
|
||||
|
@ -114,6 +114,10 @@ static void gfs2_set_iop(struct inode *inode)
|
||||
* placeholder because it doesn't otherwise make sense), the on-disk block type
|
||||
* is verified to be @blktype.
|
||||
*
|
||||
* When @no_formal_ino is non-zero, this function will return ERR_PTR(-ESTALE)
|
||||
* if it detects that @no_formal_ino doesn't match the actual inode generation
|
||||
* number. However, it doesn't always know unless @type is DT_UNKNOWN.
|
||||
*
|
||||
* Returns: A VFS inode, or an error
|
||||
*/
|
||||
|
||||
@ -157,6 +161,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = -ESTALE;
|
||||
if (no_formal_ino &&
|
||||
gfs2_inode_already_deleted(ip->i_gl, no_formal_ino))
|
||||
goto fail;
|
||||
|
||||
if (blktype != GFS2_BLKST_FREE) {
|
||||
error = gfs2_check_blk_type(sdp, no_addr,
|
||||
blktype);
|
||||
@ -170,6 +179,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
|
||||
if (unlikely(error))
|
||||
goto fail;
|
||||
gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
||||
gfs2_glock_put(io_gl);
|
||||
io_gl = NULL;
|
||||
@ -188,13 +198,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
inode->i_mode = DT2IF(type);
|
||||
}
|
||||
|
||||
gfs2_set_iop(inode);
|
||||
if (gfs2_holder_initialized(&i_gh))
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
|
||||
unlock_new_inode(inode);
|
||||
gfs2_set_iop(inode);
|
||||
}
|
||||
|
||||
if (gfs2_holder_initialized(&i_gh))
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
if (no_formal_ino && ip->i_no_formal_ino &&
|
||||
no_formal_ino != ip->i_no_formal_ino) {
|
||||
if (inode->i_state & I_NEW)
|
||||
goto fail;
|
||||
iput(inode);
|
||||
return ERR_PTR(-ESTALE);
|
||||
}
|
||||
|
||||
if (inode->i_state & I_NEW)
|
||||
unlock_new_inode(inode);
|
||||
|
||||
return inode;
|
||||
|
||||
fail:
|
||||
@ -206,23 +226,26 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_lookup_by_inum - look up an inode by inode number
|
||||
* @sdp: The super block
|
||||
* @no_addr: The inode number
|
||||
* @no_formal_ino: The inode generation number (0 for any)
|
||||
* @blktype: Requested block type (see gfs2_inode_lookup)
|
||||
*/
|
||||
struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
|
||||
u64 *no_formal_ino, unsigned int blktype)
|
||||
u64 no_formal_ino, unsigned int blktype)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
struct inode *inode;
|
||||
int error;
|
||||
|
||||
inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, blktype);
|
||||
inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, no_formal_ino,
|
||||
blktype);
|
||||
if (IS_ERR(inode))
|
||||
return inode;
|
||||
|
||||
/* Two extra checks for NFS only */
|
||||
if (no_formal_ino) {
|
||||
error = -ESTALE;
|
||||
if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino)
|
||||
goto fail_iput;
|
||||
|
||||
error = -EIO;
|
||||
if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
|
||||
goto fail_iput;
|
||||
@ -724,6 +747,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
||||
if (error)
|
||||
goto fail_gunlock2;
|
||||
|
||||
gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
||||
gfs2_set_iop(inode);
|
||||
insert_inode_hash(inode);
|
||||
|
@ -92,7 +92,7 @@ extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
|
||||
u64 no_addr, u64 no_formal_ino,
|
||||
unsigned int blktype);
|
||||
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
|
||||
u64 *no_formal_ino,
|
||||
u64 no_formal_ino,
|
||||
unsigned int blktype);
|
||||
|
||||
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
|
||||
|
@ -1835,7 +1835,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
|
||||
*/
|
||||
ip = gl->gl_object;
|
||||
|
||||
if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
|
||||
if (ip || !gfs2_queue_delete_work(gl, 0))
|
||||
gfs2_glock_put(gl);
|
||||
else
|
||||
found++;
|
||||
|
@ -626,7 +626,7 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
||||
}
|
||||
}
|
||||
|
||||
flush_workqueue(gfs2_delete_workqueue);
|
||||
gfs2_flush_delete_work(sdp);
|
||||
if (!log_write_allowed && current == sdp->sd_quotad_process)
|
||||
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
|
||||
else if (sdp->sd_quotad_process)
|
||||
@ -1054,7 +1054,7 @@ static int gfs2_drop_inode(struct inode *inode)
|
||||
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
|
||||
|
||||
gfs2_glock_hold(gl);
|
||||
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
|
||||
if (!gfs2_queue_delete_work(gl, 0))
|
||||
gfs2_glock_queue_put(gl);
|
||||
return false;
|
||||
}
|
||||
@ -1258,6 +1258,55 @@ static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
static bool gfs2_upgrade_iopen_glock(struct inode *inode)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct gfs2_holder *gh = &ip->i_iopen_gh;
|
||||
long timeout = 5 * HZ;
|
||||
int error;
|
||||
|
||||
gh->gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq_wait(gh);
|
||||
|
||||
/*
|
||||
* If there are no other lock holders, we'll get the lock immediately.
|
||||
* Otherwise, the other nodes holding the lock will be notified about
|
||||
* our locking request. If they don't have the inode open, they'll
|
||||
* evict the cached inode and release the lock. Otherwise, if they
|
||||
* poke the inode glock, we'll take this as an indication that they
|
||||
* still need the iopen glock and that they'll take care of deleting
|
||||
* the inode when they're done. As a last resort, if another node
|
||||
* keeps holding the iopen glock without showing any activity on the
|
||||
* inode glock, we'll eventually time out.
|
||||
*
|
||||
* Note that we're passing the LM_FLAG_TRY_1CB flag to the first
|
||||
* locking request as an optimization to notify lock holders as soon as
|
||||
* possible. Without that flag, they'd be notified implicitly by the
|
||||
* second locking request.
|
||||
*/
|
||||
|
||||
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
|
||||
error = gfs2_glock_nq(gh);
|
||||
if (error != GLR_TRYFAILED)
|
||||
return !error;
|
||||
|
||||
gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
|
||||
error = gfs2_glock_nq(gh);
|
||||
if (error)
|
||||
return false;
|
||||
|
||||
timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
|
||||
!test_bit(HIF_WAIT, &gh->gh_iflags) ||
|
||||
test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
|
||||
timeout);
|
||||
if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
|
||||
gfs2_glock_dq(gh);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_evict_inode - Remove an inode from cache
|
||||
* @inode: The inode to evict
|
||||
@ -1299,9 +1348,12 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
|
||||
BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
|
||||
gfs2_holder_mark_uninitialized(&gh);
|
||||
goto alloc_failed;
|
||||
goto out_delete;
|
||||
}
|
||||
|
||||
if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
|
||||
goto out;
|
||||
|
||||
/* Deletes should never happen under memory pressure anymore. */
|
||||
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
|
||||
goto out;
|
||||
@ -1315,6 +1367,8 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
|
||||
goto out_truncate;
|
||||
error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
|
||||
if (error)
|
||||
goto out_truncate;
|
||||
@ -1331,16 +1385,13 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
if (inode->i_nlink)
|
||||
goto out_truncate;
|
||||
|
||||
alloc_failed:
|
||||
out_delete:
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
|
||||
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq_wait(&ip->i_iopen_gh);
|
||||
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
|
||||
&ip->i_iopen_gh);
|
||||
error = gfs2_glock_nq(&ip->i_iopen_gh);
|
||||
if (error)
|
||||
if (!gfs2_upgrade_iopen_glock(inode)) {
|
||||
gfs2_holder_uninit(&ip->i_iopen_gh);
|
||||
goto out_truncate;
|
||||
}
|
||||
}
|
||||
|
||||
if (S_ISDIR(inode->i_mode) &&
|
||||
@ -1368,6 +1419,7 @@ static void gfs2_evict_inode(struct inode *inode)
|
||||
that subsequent inode creates don't see an old gl_object. */
|
||||
glock_clear_object(ip->i_gl, ip);
|
||||
error = gfs2_dinode_dealloc(ip);
|
||||
gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
|
||||
goto out_unlock;
|
||||
|
||||
out_truncate:
|
||||
|
@ -171,6 +171,12 @@ struct gfs2_rindex {
|
||||
#define GFS2_RGF_NOALLOC 0x00000008
|
||||
#define GFS2_RGF_TRIMMED 0x00000010
|
||||
|
||||
struct gfs2_inode_lvb {
|
||||
__be32 ri_magic;
|
||||
__be32 __pad;
|
||||
__be64 ri_generation_deleted;
|
||||
};
|
||||
|
||||
struct gfs2_rgrp_lvb {
|
||||
__be32 rl_magic;
|
||||
__be32 rl_flags;
|
||||
|
Loading…
Reference in New Issue
Block a user