diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 2716d56ed0a0..b5d04f3a247e 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -144,7 +144,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (unlikely(error)) - goto fail_put; + goto fail; if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) { /* @@ -155,13 +155,13 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &i_gh); if (error) - goto fail_put; + goto fail; if (blktype != GFS2_BLKST_FREE) { error = gfs2_check_blk_type(sdp, no_addr, blktype); if (error) - goto fail_put; + goto fail; } } @@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, set_bit(GIF_INVALID, &ip->i_flags); error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); if (unlikely(error)) - goto fail_put; + goto fail; glock_set_object(ip->i_iopen_gh.gh_gl, ip); gfs2_glock_put(io_gl); io_gl = NULL; @@ -182,7 +182,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, /* Inode glock must be locked already */ error = gfs2_inode_refresh(GFS2_I(inode)); if (error) - goto fail_refresh; + goto fail; } else { ip->i_no_formal_ino = no_formal_ino; inode->i_mode = DT2IF(type); @@ -197,17 +197,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, gfs2_glock_dq_uninit(&i_gh); return inode; -fail_refresh: - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; - glock_clear_object(ip->i_iopen_gh.gh_gl, ip); - gfs2_glock_dq_uninit(&ip->i_iopen_gh); -fail_put: +fail: if (io_gl) gfs2_glock_put(io_gl); - glock_clear_object(ip->i_gl, ip); if (gfs2_holder_initialized(&i_gh)) gfs2_glock_dq_uninit(&i_gh); -fail: iget_failed(inode); return ERR_PTR(error); } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 693c6d13473c..d664b0175946 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1393,14 +1393,6 @@ static void gfs2_evict_inode(struct inode *inode) if (gfs2_rs_active(&ip->i_res)) gfs2_rs_deltree(&ip->i_res); - if (gfs2_holder_initialized(&ip->i_iopen_gh)) { - glock_clear_object(ip->i_iopen_gh.gh_gl, ip); - if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; - gfs2_glock_dq(&ip->i_iopen_gh); - } - gfs2_holder_uninit(&ip->i_iopen_gh); - } if (gfs2_holder_initialized(&gh)) { glock_clear_object(ip->i_gl, ip); gfs2_glock_dq_uninit(&gh); @@ -1413,18 +1405,23 @@ static void gfs2_evict_inode(struct inode *inode) gfs2_ordered_del_inode(ip); clear_inode(inode); gfs2_dir_hash_inval(ip); - glock_clear_object(ip->i_gl, ip); - wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); - gfs2_glock_add_to_lru(ip->i_gl); - gfs2_glock_put_eventually(ip->i_gl); - ip->i_gl = NULL; + if (ip->i_gl) { + glock_clear_object(ip->i_gl, ip); + wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); + gfs2_glock_add_to_lru(ip->i_gl); + gfs2_glock_put_eventually(ip->i_gl); + ip->i_gl = NULL; + } if (gfs2_holder_initialized(&ip->i_iopen_gh)) { struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; glock_clear_object(gl, ip); - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; + if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { + ip->i_iopen_gh.gh_flags |= GL_NOCACHE; + gfs2_glock_dq(&ip->i_iopen_gh); + } gfs2_glock_hold(gl); - gfs2_glock_dq_uninit(&ip->i_iopen_gh); + gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_glock_put_eventually(gl); } } @@ -1438,6 +1435,7 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb) return NULL; ip->i_flags = 0; ip->i_gl = NULL; + gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); memset(&ip->i_res, 0, sizeof(ip->i_res)); RB_CLEAR_NODE(&ip->i_res.rs_node); ip->i_rahead = 0;