mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 03:40:52 +07:00
gfs2: Turn gl_delete into a delayed work
This requires flushing delayed work items in gfs2_make_fs_ro (which is called before unmounting a filesystem). When inodes are deleted and then recreated, pending gl_delete work items would have no effect because the inode generations will have changed, so we can cancel any pending gl_delete works before reusing iopen glocks. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
parent
f286d627ef
commit
a0e3cc65fa
@ -776,11 +776,16 @@ bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
|
|||||||
|
|
||||||
static void delete_work_func(struct work_struct *work)
|
static void delete_work_func(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
|
struct delayed_work *dwork = to_delayed_work(work);
|
||||||
|
struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
|
||||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
u64 no_addr = gl->gl_name.ln_number;
|
u64 no_addr = gl->gl_name.ln_number;
|
||||||
|
|
||||||
|
spin_lock(&gl->gl_lockref.lock);
|
||||||
|
clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||||
|
spin_unlock(&gl->gl_lockref.lock);
|
||||||
|
|
||||||
/* If someone's using this glock to create a new dinode, the block must
|
/* If someone's using this glock to create a new dinode, the block must
|
||||||
have been freed by another node, then re-used, in which case our
|
have been freed by another node, then re-used, in which case our
|
||||||
iopen callback is too late after the fact. Ignore it. */
|
iopen callback is too late after the fact. Ignore it. */
|
||||||
@ -949,7 +954,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|||||||
gl->gl_object = NULL;
|
gl->gl_object = NULL;
|
||||||
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
|
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
|
||||||
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
|
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
|
||||||
INIT_WORK(&gl->gl_delete, delete_work_func);
|
INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
|
||||||
|
|
||||||
mapping = gfs2_glock2aspace(gl);
|
mapping = gfs2_glock2aspace(gl);
|
||||||
if (mapping) {
|
if (mapping) {
|
||||||
@ -1772,6 +1777,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
|
|||||||
rhashtable_walk_exit(&iter);
|
rhashtable_walk_exit(&iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
|
||||||
|
{
|
||||||
|
bool queued;
|
||||||
|
|
||||||
|
spin_lock(&gl->gl_lockref.lock);
|
||||||
|
queued = queue_delayed_work(gfs2_delete_workqueue,
|
||||||
|
&gl->gl_delete, delay);
|
||||||
|
if (queued)
|
||||||
|
set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||||
|
spin_unlock(&gl->gl_lockref.lock);
|
||||||
|
return queued;
|
||||||
|
}
|
||||||
|
|
||||||
|
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
|
||||||
|
{
|
||||||
|
if (cancel_delayed_work_sync(&gl->gl_delete)) {
|
||||||
|
clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||||
|
gfs2_glock_put(gl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
|
||||||
|
{
|
||||||
|
return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void flush_delete_work(struct gfs2_glock *gl)
|
||||||
|
{
|
||||||
|
flush_delayed_work(&gl->gl_delete);
|
||||||
|
gfs2_glock_queue_work(gl, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
|
||||||
|
{
|
||||||
|
glock_hash_walk(flush_delete_work, sdp);
|
||||||
|
flush_workqueue(gfs2_delete_workqueue);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
|
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
|
||||||
* @gl: The glock to thaw
|
* @gl: The glock to thaw
|
||||||
|
@ -235,6 +235,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
|
|||||||
|
|
||||||
extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
|
extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
|
||||||
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
|
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
|
||||||
|
extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
|
||||||
|
extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
|
||||||
|
extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
|
||||||
|
extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
|
||||||
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
||||||
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
|
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
|
||||||
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
|
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
|
||||||
|
@ -608,11 +608,17 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
|||||||
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
||||||
gl->gl_state == LM_ST_SHARED && ip) {
|
gl->gl_state == LM_ST_SHARED && ip) {
|
||||||
gl->gl_lockref.count++;
|
gl->gl_lockref.count++;
|
||||||
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
|
if (!queue_delayed_work(gfs2_delete_workqueue,
|
||||||
|
&gl->gl_delete, 0))
|
||||||
gl->gl_lockref.count--;
|
gl->gl_lockref.count--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iopen_go_demote_ok(const struct gfs2_glock *gl)
|
||||||
|
{
|
||||||
|
return !gfs2_delete_work_queued(gl);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
|
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
|
||||||
* @gl: glock being freed
|
* @gl: glock being freed
|
||||||
@ -716,6 +722,7 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
|
|||||||
const struct gfs2_glock_operations gfs2_iopen_glops = {
|
const struct gfs2_glock_operations gfs2_iopen_glops = {
|
||||||
.go_type = LM_TYPE_IOPEN,
|
.go_type = LM_TYPE_IOPEN,
|
||||||
.go_callback = iopen_go_callback,
|
.go_callback = iopen_go_callback,
|
||||||
|
.go_demote_ok = iopen_go_demote_ok,
|
||||||
.go_flags = GLOF_LRU | GLOF_NONDISK,
|
.go_flags = GLOF_LRU | GLOF_NONDISK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -345,6 +345,7 @@ enum {
|
|||||||
GLF_OBJECT = 14, /* Used only for tracing */
|
GLF_OBJECT = 14, /* Used only for tracing */
|
||||||
GLF_BLOCKING = 15,
|
GLF_BLOCKING = 15,
|
||||||
GLF_INODE_CREATING = 16, /* Inode creation occurring */
|
GLF_INODE_CREATING = 16, /* Inode creation occurring */
|
||||||
|
GLF_PENDING_DELETE = 17,
|
||||||
GLF_FREEING = 18, /* Wait for glock to be freed */
|
GLF_FREEING = 18, /* Wait for glock to be freed */
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -378,8 +379,8 @@ struct gfs2_glock {
|
|||||||
atomic_t gl_revokes;
|
atomic_t gl_revokes;
|
||||||
struct delayed_work gl_work;
|
struct delayed_work gl_work;
|
||||||
union {
|
union {
|
||||||
/* For inode and iopen glocks only */
|
/* For iopen glocks only */
|
||||||
struct work_struct gl_delete;
|
struct delayed_work gl_delete;
|
||||||
/* For rgrp glocks only */
|
/* For rgrp glocks only */
|
||||||
struct {
|
struct {
|
||||||
loff_t start;
|
loff_t start;
|
||||||
|
@ -170,6 +170,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
|||||||
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
|
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
|
||||||
if (unlikely(error))
|
if (unlikely(error))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
|
||||||
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
||||||
gfs2_glock_put(io_gl);
|
gfs2_glock_put(io_gl);
|
||||||
io_gl = NULL;
|
io_gl = NULL;
|
||||||
@ -724,6 +725,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
|||||||
if (error)
|
if (error)
|
||||||
goto fail_gunlock2;
|
goto fail_gunlock2;
|
||||||
|
|
||||||
|
gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
|
||||||
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
||||||
gfs2_set_iop(inode);
|
gfs2_set_iop(inode);
|
||||||
insert_inode_hash(inode);
|
insert_inode_hash(inode);
|
||||||
|
@ -1835,7 +1835,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
|
|||||||
*/
|
*/
|
||||||
ip = gl->gl_object;
|
ip = gl->gl_object;
|
||||||
|
|
||||||
if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
|
if (ip || !gfs2_queue_delete_work(gl, 0))
|
||||||
gfs2_glock_put(gl);
|
gfs2_glock_put(gl);
|
||||||
else
|
else
|
||||||
found++;
|
found++;
|
||||||
|
@ -626,7 +626,7 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
flush_workqueue(gfs2_delete_workqueue);
|
gfs2_flush_delete_work(sdp);
|
||||||
if (!log_write_allowed && current == sdp->sd_quotad_process)
|
if (!log_write_allowed && current == sdp->sd_quotad_process)
|
||||||
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
|
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
|
||||||
else if (sdp->sd_quotad_process)
|
else if (sdp->sd_quotad_process)
|
||||||
@ -1054,7 +1054,7 @@ static int gfs2_drop_inode(struct inode *inode)
|
|||||||
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
|
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
|
||||||
|
|
||||||
gfs2_glock_hold(gl);
|
gfs2_glock_hold(gl);
|
||||||
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
|
if (!gfs2_queue_delete_work(gl, 0))
|
||||||
gfs2_glock_queue_put(gl);
|
gfs2_glock_queue_put(gl);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user