diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 84544a4f012d..a683d9b27d76 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1566,7 +1566,7 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, continue; } if (bstart) { - __gfs2_free_blocks(ip, bstart, (u32)blen, meta); + __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta); (*btotal) += blen; gfs2_add_inode_blocks(&ip->i_inode, -blen); } @@ -1574,7 +1574,7 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, blen = 1; } if (bstart) { - __gfs2_free_blocks(ip, bstart, (u32)blen, meta); + __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta); (*btotal) += blen; gfs2_add_inode_blocks(&ip->i_inode, -blen); } diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index e37002560c11..daa14ab4e31b 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -506,7 +506,8 @@ static int gfs2_dirent_gather(const struct gfs2_dirent *dent, * For now the most important thing is to check that the various sizes * are correct. */ -static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset, +static int gfs2_check_dirent(struct gfs2_sbd *sdp, + struct gfs2_dirent *dent, unsigned int offset, unsigned int size, unsigned int len, int first) { const char *msg = "gfs2_dirent too small"; @@ -528,12 +529,12 @@ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset, goto error; return 0; error: - pr_warn("%s: %s (%s)\n", + fs_warn(sdp, "%s: %s (%s)\n", __func__, msg, first ? "first in block" : "not first in block"); return -EIO; } -static int gfs2_dirent_offset(const void *buf) +static int gfs2_dirent_offset(struct gfs2_sbd *sdp, const void *buf) { const struct gfs2_meta_header *h = buf; int offset; @@ -552,7 +553,8 @@ static int gfs2_dirent_offset(const void *buf) } return offset; wrong_type: - pr_warn("%s: wrong block type %u\n", __func__, be32_to_cpu(h->mh_type)); + fs_warn(sdp, "%s: wrong block type %u\n", __func__, + be32_to_cpu(h->mh_type)); return -1; } @@ -566,7 +568,7 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, unsigned size; int ret = 0; - ret = gfs2_dirent_offset(buf); + ret = gfs2_dirent_offset(GFS2_SB(inode), buf); if (ret < 0) goto consist_inode; @@ -574,7 +576,7 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, prev = NULL; dent = buf + offset; size = be16_to_cpu(dent->de_rec_len); - if (gfs2_check_dirent(dent, offset, size, len, 1)) + if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) goto consist_inode; do { ret = scan(dent, name, opaque); @@ -586,7 +588,8 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, prev = dent; dent = buf + offset; size = be16_to_cpu(dent->de_rec_len); - if (gfs2_check_dirent(dent, offset, size, len, 0)) + if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, + len, 0)) goto consist_inode; } while(1); @@ -1043,7 +1046,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) len = BIT(dip->i_depth - be16_to_cpu(oleaf->lf_depth)); half_len = len >> 1; if (!half_len) { - pr_warn("i_depth %u lf_depth %u index %u\n", + fs_warn(GFS2_SB(inode), "i_depth %u lf_depth %u index %u\n", dip->i_depth, be16_to_cpu(oleaf->lf_depth), index); gfs2_consist_inode(dip); error = -EIO; @@ -1351,7 +1354,7 @@ static int gfs2_set_cookies(struct gfs2_sbd *sdp, struct buffer_head *bh, if (!sdp->sd_args.ar_loccookie) continue; offset = (char *)(darr[i]) - - (bh->b_data + gfs2_dirent_offset(bh->b_data)); + (bh->b_data + gfs2_dirent_offset(sdp, bh->b_data)); offset /= GFS2_MIN_DIRENT_SIZE; offset += leaf_nr * sdp->sd_max_dents_per_leaf; if (offset >= GFS2_USE_HASH_FLAG || @@ -2018,7 +2021,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, l_blocks++; } - gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); + gfs2_rlist_alloc(&rlist); for (x = 0; x < rlist.rl_rgrps; x++) { struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); @@ -2039,6 +2042,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, bh = leaf_bh; for (blk = leaf_no; blk; blk = nblk) { + struct gfs2_rgrpd *rgd; + if (blk != leaf_no) { error = get_leaf(dip, blk, &bh); if (error) @@ -2049,7 +2054,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, if (blk != leaf_no) brelse(bh); - gfs2_free_meta(dip, blk, 1); + rgd = gfs2_blk2rgrpd(sdp, blk, true); + gfs2_free_meta(dip, rgd, blk, 1); gfs2_add_inode_blocks(&dip->i_inode, -1); } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 08369c6cd127..45a17b770d97 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -314,6 +314,17 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr) return do_gfs2_set_flags(filp, gfsflags, mask); } +static int gfs2_getlabel(struct file *filp, char __user *label) +{ + struct inode *inode = file_inode(filp); + struct gfs2_sbd *sdp = GFS2_SB(inode); + + if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN)) + return -EFAULT; + + return 0; +} + static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch(cmd) { @@ -323,7 +334,10 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return gfs2_set_flags(filp, (u32 __user *)arg); case FITRIM: return gfs2_fitrim(filp, (void __user *)arg); + case FS_IOC_GETFSLABEL: + return gfs2_getlabel(filp, (char __user *)arg); } + return -ENOTTY; } @@ -347,8 +361,8 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size) size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift; int hint = min_t(size_t, INT_MAX, blks); - if (hint > atomic_read(&ip->i_res.rs_sizehint)) - atomic_set(&ip->i_res.rs_sizehint, hint); + if (hint > atomic_read(&ip->i_sizehint)) + atomic_set(&ip->i_sizehint, hint); } /** diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 4614ee25f621..05431324b262 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -494,7 +494,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) do_xmote(gl, gh, LM_ST_UNLOCKED); break; default: /* Everything else */ - pr_err("wanted %u got %u\n", gl->gl_target, state); + fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", + gl->gl_target, state); GLOCK_BUG_ON(gl, 1); } spin_unlock(&gl->gl_lockref.lock); @@ -577,7 +578,7 @@ __acquires(&gl->gl_lockref.lock) gfs2_glock_queue_work(gl, 0); } else if (ret) { - pr_err("lm_lock ret %d\n", ret); + fs_err(sdp, "lm_lock ret %d\n", ret); GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)); } @@ -1064,13 +1065,13 @@ __acquires(&gl->gl_lockref.lock) return; trap_recursive: - pr_err("original: %pSR\n", (void *)gh2->gh_ip); - pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid)); - pr_err("lock type: %d req lock state : %d\n", + fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); + fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); + fs_err(sdp, "lock type: %d req lock state : %d\n", gh2->gh_gl->gl_name.ln_type, gh2->gh_state); - pr_err("new: %pSR\n", (void *)gh->gh_ip); - pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid)); - pr_err("lock type: %d req lock state : %d\n", + fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); + fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); + fs_err(sdp, "lock type: %d req lock state : %d\n", gh->gh_gl->gl_name.ln_type, gh->gh_state); gfs2_dump_glock(NULL, gl); BUG(); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index b96d39c28e17..888b62cfd6d1 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -92,7 +92,7 @@ struct gfs2_bitmap { unsigned long bi_flags; u32 bi_offset; u32 bi_start; - u32 bi_len; + u32 bi_bytes; u32 bi_blocks; }; @@ -309,10 +309,6 @@ struct gfs2_qadata { /* quota allocation data */ */ struct gfs2_blkreserv { - /* components used during write (step 1): */ - atomic_t rs_sizehint; /* hint of the write size */ - - struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */ struct rb_node rs_node; /* link to other block reservations */ struct gfs2_rbm rs_rbm; /* Start of reservation */ u32 rs_free; /* how many blocks are still free */ @@ -417,8 +413,10 @@ struct gfs2_inode { struct gfs2_holder i_iopen_gh; struct gfs2_holder i_gh; /* for prepare/commit_write only */ struct gfs2_qadata *i_qadata; /* quota allocation data */ + struct gfs2_holder i_rgd_gh; struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */ u64 i_goal; /* goal block for allocations */ + atomic_t i_sizehint; /* hint of the write size */ struct rw_semaphore i_rw_mutex; struct list_head i_ordered; struct list_head i_trunc_list; @@ -623,6 +621,7 @@ enum { SDF_RORECOVERY = 7, /* read only recovery */ SDF_SKIP_DLM_UNLOCK = 8, SDF_FORCE_AIL_FLUSH = 9, + SDF_AIL1_IO_ERROR = 10, }; enum gfs2_freeze_state { diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index ac7caa267ed6..31df26ed7854 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -177,14 +177,14 @@ static void gdlm_bast(void *arg, int mode) gfs2_glock_cb(gl, LM_ST_SHARED); break; default: - pr_err("unknown bast mode %d\n", mode); + fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); BUG(); } } /* convert gfs lock-state to dlm lock-mode */ -static int make_mode(const unsigned int lmstate) +static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate) { switch (lmstate) { case LM_ST_UNLOCKED: @@ -196,7 +196,7 @@ static int make_mode(const unsigned int lmstate) case LM_ST_SHARED: return DLM_LOCK_PR; } - pr_err("unknown LM state %d\n", lmstate); + fs_err(sdp, "unknown LM state %d\n", lmstate); BUG(); return -1; } @@ -257,7 +257,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, u32 lkf; char strname[GDLM_STRNAME_BYTES] = ""; - req = make_mode(req_state); + req = make_mode(gl->gl_name.ln_sbd, req_state); lkf = make_flags(gl, flags, req); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); @@ -309,7 +309,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl) error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, NULL, gl); if (error) { - pr_err("gdlm_unlock %x,%llx err=%d\n", + fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); return; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index ee20ea42e7b5..99dd58694ba1 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -108,7 +108,9 @@ __acquires(&sdp->sd_ail_lock) gfs2_assert(sdp, bd->bd_tr == tr); if (!buffer_busy(bh)) { - if (!buffer_uptodate(bh)) { + if (!buffer_uptodate(bh) && + !test_and_set_bit(SDF_AIL1_IO_ERROR, + &sdp->sd_flags)) { gfs2_io_error_bh(sdp, bh); *withdraw = true; } @@ -206,7 +208,8 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr, gfs2_assert(sdp, bd->bd_tr == tr); if (buffer_busy(bh)) continue; - if (!buffer_uptodate(bh)) { + if (!buffer_uptodate(bh) && + !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) { gfs2_io_error_bh(sdp, bh); *withdraw = true; } @@ -618,7 +621,7 @@ void gfs2_write_revokes(struct gfs2_sbd *sdp) gfs2_ail1_empty(sdp); spin_lock(&sdp->sd_ail_lock); - list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { + list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { if (list_empty(&bd->bd_list)) { have_revokes = 1; @@ -642,7 +645,7 @@ void gfs2_write_revokes(struct gfs2_sbd *sdp) } gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); - list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { + list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { if (max_revokes == 0) goto out_of_blocks; diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index f2567f958d00..4c7069b8f3c1 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -81,7 +81,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd) if (sdp->sd_args.ar_discard) gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); memcpy(bi->bi_clone + bi->bi_offset, - bd->bd_bh->b_data + bi->bi_offset, bi->bi_len); + bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); clear_bit(GBF_FULL, &bi->bi_flags); rgd->rd_free_clone = rgd->rd_free; rgd->rd_extfail_pt = rgd->rd_free; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 2d55e2c3333c..c7603063f861 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -39,9 +39,11 @@ static void gfs2_init_inode_once(void *foo) struct gfs2_inode *ip = foo; inode_init_once(&ip->i_inode); + atomic_set(&ip->i_sizehint, 0); init_rwsem(&ip->i_rw_mutex); INIT_LIST_HEAD(&ip->i_trunc_list); ip->i_qadata = NULL; + gfs2_holder_mark_uninitialized(&ip->i_rgd_gh); memset(&ip->i_res, 0, sizeof(ip->i_res)); RB_CLEAR_NODE(&ip->i_res.rs_node); ip->i_hash_cache = NULL; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 6b84ef6ccff3..b041cb8ae383 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -72,13 +72,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) if (!sdp) return NULL; - sb->s_fs_info = sdp; sdp->sd_vfs = sb; sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); if (!sdp->sd_lkstats) { kfree(sdp); return NULL; } + sb->s_fs_info = sdp; set_bit(SDF_NOJOURNALID, &sdp->sd_flags); gfs2_tune_init(&sdp->sd_tune); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 0efae7a0ee80..2ae5a109eea7 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1183,7 +1183,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type) * * Returns: 0 on success. * min_req = ap->min_target ? ap->min_target : ap->target; - * quota must allow atleast min_req blks for success and + * quota must allow at least min_req blks for success and * ap->allowed is set to the number of blocks allowed * * -EDQUOT otherwise, quota violation. ap->allowed is set to number diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 1ad3256b9cbc..ffe3032b1043 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -90,7 +90,7 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone, { unsigned char *byte1, *byte2, *end, cur_state; struct gfs2_bitmap *bi = rbm_bi(rbm); - unsigned int buflen = bi->bi_len; + unsigned int buflen = bi->bi_bytes; const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY); @@ -101,12 +101,16 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone, cur_state = (*byte1 >> bit) & GFS2_BIT_MASK; if (unlikely(!valid_change[new_state * 4 + cur_state])) { - pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n", + struct gfs2_sbd *sdp = rbm->rgd->rd_sbd; + + fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n", rbm->offset, cur_state, new_state); - pr_warn("rgrp=0x%llx bi_start=0x%x\n", - (unsigned long long)rbm->rgd->rd_addr, bi->bi_start); - pr_warn("bi_offset=0x%x bi_len=0x%x\n", - bi->bi_offset, bi->bi_len); + fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n", + (unsigned long long)rbm->rgd->rd_addr, bi->bi_start, + (unsigned long long)bi->bi_bh->b_blocknr); + fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n", + bi->bi_offset, bi->bi_bytes, + (unsigned long long)gfs2_rbm_to_block(rbm)); dump_stack(); gfs2_consist_rgrpd(rbm->rgd); return; @@ -269,15 +273,10 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) { - u64 rblock = block - rbm->rgd->rd_data0; - - if (WARN_ON_ONCE(rblock > UINT_MAX)) - return -EINVAL; - if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) + if (!rgrp_contains_block(rbm->rgd, block)) return -E2BIG; - rbm->bii = 0; - rbm->offset = (u32)(rblock); + rbm->offset = block - rbm->rgd->rd_data0; /* Check if the block is within the first block */ if (rbm->offset < rbm_bi(rbm)->bi_blocks) return 0; @@ -382,7 +381,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len) if (bi->bi_clone) start = bi->bi_clone; start += bi->bi_offset; - end = start + bi->bi_len; + end = start + bi->bi_bytes; BUG_ON(rbm.offset & 3); start += (rbm.offset / GFS2_NBBY); bytes = min_t(u32, len / GFS2_NBBY, (end - start)); @@ -467,7 +466,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) count[x] += gfs2_bitcount(rgd, bi->bi_bh->b_data + bi->bi_offset, - bi->bi_len, x); + bi->bi_bytes, x); } if (count[0] != rgd->rd_free) { @@ -642,7 +641,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs) RB_CLEAR_NODE(&rs->rs_node); if (rs->rs_free) { - struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm); + u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) + + rs->rs_free - 1; + struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, }; + struct gfs2_bitmap *start, *last; /* return reserved blocks to the rgrp */ BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); @@ -653,7 +655,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs) it will force the number to be recalculated later. */ rgd->rd_extfail_pt += rs->rs_free; rs->rs_free = 0; - clear_bit(GBF_FULL, &bi->bi_flags); + if (gfs2_rbm_from_block(&last_rbm, last_block)) + return; + start = rbm_bi(&rs->rs_rbm); + last = rbm_bi(&last_rbm); + do + clear_bit(GBF_FULL, &start->bi_flags); + while (start++ != last); } } @@ -738,11 +746,13 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd) { - pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); - pr_info("ri_length = %u\n", rgd->rd_length); - pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0); - pr_info("ri_data = %u\n", rgd->rd_data); - pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes); + struct gfs2_sbd *sdp = rgd->rd_sbd; + + fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); + fs_info(sdp, "ri_length = %u\n", rgd->rd_length); + fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0); + fs_info(sdp, "ri_data = %u\n", rgd->rd_data); + fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes); } /** @@ -780,21 +790,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd) bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; - bi->bi_len = bytes; + bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; /* header block */ } else if (x == 0) { bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); bi->bi_offset = sizeof(struct gfs2_rgrp); bi->bi_start = 0; - bi->bi_len = bytes; + bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; /* last block */ } else if (x + 1 == length) { bytes = bytes_left; bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; - bi->bi_len = bytes; + bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; /* other blocks */ } else { @@ -802,7 +812,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd) sizeof(struct gfs2_meta_header); bi->bi_offset = sizeof(struct gfs2_meta_header); bi->bi_start = rgd->rd_bitbytes - bytes_left; - bi->bi_len = bytes; + bi->bi_bytes = bytes; bi->bi_blocks = bytes * GFS2_NBBY; } @@ -814,11 +824,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd) return -EIO; } bi = rgd->rd_bits + (length - 1); - if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) { + if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) { if (gfs2_consist_rgrpd(rgd)) { gfs2_rindex_print(rgd); fs_err(sdp, "start=%u len=%u offset=%u\n", - bi->bi_start, bi->bi_len, bi->bi_offset); + bi->bi_start, bi->bi_bytes, bi->bi_offset); } return -EIO; } @@ -1103,12 +1113,35 @@ static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd) { struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data; + int valid = 1; - if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free || - rgl->rl_dinodes != str->rg_dinodes || - rgl->rl_igeneration != str->rg_igeneration) - return 0; - return 1; + if (rgl->rl_flags != str->rg_flags) { + printk(KERN_WARNING "GFS2: rgd: %llu lvb flag mismatch %u/%u", + (unsigned long long)rgd->rd_addr, + be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags)); + valid = 0; + } + if (rgl->rl_free != str->rg_free) { + printk(KERN_WARNING "GFS2: rgd: %llu lvb free mismatch %u/%u", + (unsigned long long)rgd->rd_addr, + be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free)); + valid = 0; + } + if (rgl->rl_dinodes != str->rg_dinodes) { + printk(KERN_WARNING "GFS2: rgd: %llu lvb dinode mismatch %u/%u", + (unsigned long long)rgd->rd_addr, + be32_to_cpu(rgl->rl_dinodes), + be32_to_cpu(str->rg_dinodes)); + valid = 0; + } + if (rgl->rl_igeneration != str->rg_igeneration) { + printk(KERN_WARNING "GFS2: rgd: %llu lvb igen mismatch " + "%llu/%llu", (unsigned long long)rgd->rd_addr, + (unsigned long long)be64_to_cpu(rgl->rl_igeneration), + (unsigned long long)be64_to_cpu(str->rg_igeneration)); + valid = 0; + } + return valid; } static u32 count_unlinked(struct gfs2_rgrpd *rgd) @@ -1122,8 +1155,8 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd) goal = 0; buffer = bi->bi_bh->b_data + bi->bi_offset; WARN_ON(!buffer_uptodate(bi->bi_bh)); - while (goal < bi->bi_len * GFS2_NBBY) { - goal = gfs2_bitfit(buffer, bi->bi_len, goal, + while (goal < bi->bi_blocks) { + goal = gfs2_bitfit(buffer, bi->bi_bytes, goal, GFS2_BLKST_UNLINKED); if (goal == BFITNOENT) break; @@ -1226,7 +1259,7 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd) rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags); rl_flags &= ~GFS2_RDF_MASK; rgd->rd_flags &= GFS2_RDF_MASK; - rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); + rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK); if (rgd->rd_rgl->rl_unlinked == 0) rgd->rd_flags &= ~GFS2_RDF_CHECK; rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free); @@ -1295,7 +1328,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, u32 trimmed = 0; u8 diff; - for (x = 0; x < bi->bi_len; x++) { + for (x = 0; x < bi->bi_bytes; x++) { const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data; clone += bi->bi_offset; clone += x; @@ -1541,8 +1574,8 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, if (S_ISDIR(inode->i_mode)) extlen = 1; else { - extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target); - extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); + extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target); + extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks); } if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) return; @@ -1728,7 +1761,7 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, if (state != GFS2_BLKST_UNLINKED && bi->bi_clone) buffer = bi->bi_clone + bi->bi_offset; initial_offset = rbm->offset; - offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state); + offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state); if (offset == BFITNOENT) goto bitmap_full; rbm->offset = offset; @@ -1999,7 +2032,7 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd) * We try our best to find an rgrp that has at least ap->target blocks * available. After a couple of passes (loops == 2), the prospects of finding * such an rgrp diminish. At this stage, we return the first rgrp that has - * atleast ap->min_target blocks available. Either way, we set ap->allowed to + * at least ap->min_target blocks available. Either way, we set ap->allowed to * the number of blocks available in the chosen rgrp. * * Returns: 0 on success, @@ -2053,7 +2086,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) } error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl, LM_ST_EXCLUSIVE, flags, - &rs->rs_rgd_gh); + &ip->i_rgd_gh); if (unlikely(error)) return error; if (!gfs2_rs_active(rs) && (loops < 2) && @@ -2062,13 +2095,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) if (sdp->sd_args.ar_rgrplvb) { error = update_rgrp_lvb(rs->rs_rbm.rgd); if (unlikely(error)) { - gfs2_glock_dq_uninit(&rs->rs_rgd_gh); + gfs2_glock_dq_uninit(&ip->i_rgd_gh); return error; } } } - /* Skip unuseable resource groups */ + /* Skip unusable resource groups */ if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) || (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt)) @@ -2105,7 +2138,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) /* Unlock rgrp if required */ if (!rg_locked) - gfs2_glock_dq_uninit(&rs->rs_rgd_gh); + gfs2_glock_dq_uninit(&ip->i_rgd_gh); next_rgrp: /* Find the next rgrp, and continue looking */ if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin)) @@ -2142,10 +2175,8 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) void gfs2_inplace_release(struct gfs2_inode *ip) { - struct gfs2_blkreserv *rs = &ip->i_res; - - if (gfs2_holder_initialized(&rs->rs_rgd_gh)) - gfs2_glock_dq_uninit(&rs->rs_rgd_gh); + if (gfs2_holder_initialized(&ip->i_rgd_gh)) + gfs2_glock_dq_uninit(&ip->i_rgd_gh); } /** @@ -2184,27 +2215,21 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode, /** * rgblk_free - Change alloc state of given block(s) * @sdp: the filesystem + * @rgd: the resource group the blocks are in * @bstart: the start of a run of blocks to free * @blen: the length of the block run (all must lie within ONE RG!) * @new_state: GFS2_BLKST_XXX the after-allocation block state - * - * Returns: Resource group containing the block(s) */ -static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, - u32 blen, unsigned char new_state) +static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd, + u64 bstart, u32 blen, unsigned char new_state) { struct gfs2_rbm rbm; struct gfs2_bitmap *bi, *bi_prev = NULL; - rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1); - if (!rbm.rgd) { - if (gfs2_consist(sdp)) - fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); - return NULL; - } - - gfs2_rbm_from_block(&rbm, bstart); + rbm.rgd = rgd; + if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart))) + return; while (blen--) { bi = rbm_bi(&rbm); if (bi != bi_prev) { @@ -2213,7 +2238,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, GFP_NOFS | __GFP_NOFAIL); memcpy(bi->bi_clone + bi->bi_offset, bi->bi_bh->b_data + bi->bi_offset, - bi->bi_len); + bi->bi_bytes); } gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh); bi_prev = bi; @@ -2221,8 +2246,6 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, gfs2_setbit(&rbm, false, new_state); gfs2_rbm_incr(&rbm); } - - return rbm.rgd; } /** @@ -2244,6 +2267,14 @@ void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl) (unsigned long long)rgd->rd_addr, rgd->rd_flags, rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes, rgd->rd_reserved, rgd->rd_extfail_pt); + if (rgd->rd_sbd->sd_args.ar_rgrplvb) { + struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl; + + gfs2_print_dbg(seq, " L: f:%02x b:%u i:%u\n", + be32_to_cpu(rgl->rl_flags), + be32_to_cpu(rgl->rl_free), + be32_to_cpu(rgl->rl_dinodes)); + } spin_lock(&rgd->rd_rsspin); for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { trs = rb_entry(n, struct gfs2_blkreserv, rs_node); @@ -2295,7 +2326,7 @@ static void gfs2_adjust_reservation(struct gfs2_inode *ip, goto out; /* We used up our block reservation, so we should reserve more blocks next time. */ - atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint); + atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint); } __rs_deltree(rs); } @@ -2329,7 +2360,10 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm, else goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0; - gfs2_rbm_from_block(rbm, goal); + if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) { + rbm->bii = 0; + rbm->offset = 0; + } } /** @@ -2392,7 +2426,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, } } if (rbm.rgd->rd_free < *nblocks) { - pr_warn("nblocks=%u\n", *nblocks); + fs_warn(sdp, "nblocks=%u\n", *nblocks); goto rgrp_error; } @@ -2427,20 +2461,19 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, /** * __gfs2_free_blocks - free a contiguous run of block(s) * @ip: the inode these blocks are being freed from + * @rgd: the resource group the blocks are in * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * @meta: 1 if the blocks represent metadata * */ -void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) +void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, + u64 bstart, u32 blen, int meta) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct gfs2_rgrpd *rgd; - rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); - if (!rgd) - return; + rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE); trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); rgd->rd_free += blen; rgd->rd_flags &= ~GFS2_RGF_TRIMMED; @@ -2455,16 +2488,18 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) /** * gfs2_free_meta - free a contiguous run of data block(s) * @ip: the inode these blocks are being freed from + * @rgd: the resource group the blocks are in * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run * */ -void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) +void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, + u64 bstart, u32 blen) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - __gfs2_free_blocks(ip, bstart, blen, 1); + __gfs2_free_blocks(ip, rgd, bstart, blen, 1); gfs2_statfs_change(sdp, 0, +blen, 0); gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); } @@ -2476,9 +2511,10 @@ void gfs2_unlink_di(struct inode *inode) struct gfs2_rgrpd *rgd; u64 blkno = ip->i_no_addr; - rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); + rgd = gfs2_blk2rgrpd(sdp, blkno, true); if (!rgd) return; + rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED); trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); @@ -2488,13 +2524,8 @@ void gfs2_unlink_di(struct inode *inode) void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) { struct gfs2_sbd *sdp = rgd->rd_sbd; - struct gfs2_rgrpd *tmp_rgd; - - tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE); - if (!tmp_rgd) - return; - gfs2_assert_withdraw(sdp, rgd == tmp_rgd); + rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); if (!rgd->rd_dinodes) gfs2_consist_rgrpd(rgd); rgd->rd_dinodes--; @@ -2538,7 +2569,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) rbm.rgd = rgd; error = gfs2_rbm_from_block(&rbm, no_addr); - WARN_ON_ONCE(error != 0); + if (WARN_ON_ONCE(error)) + goto fail; if (gfs2_testbit(&rbm, false) != type) error = -ESTALE; @@ -2624,13 +2656,12 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate * and initialize an array of glock holders for them * @rlist: the list of resource groups - * @state: the lock state to acquire the RG lock in * * FIXME: Don't use NOFAIL * */ -void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) +void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist) { unsigned int x; @@ -2639,7 +2670,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) GFP_NOFS | __GFP_NOFAIL); for (x = 0; x < rlist->rl_rgrps; x++) gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, - state, 0, + LM_ST_EXCLUSIVE, 0, &rlist->rl_ghs[x]); } diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index e90478e2f545..b596c3d17988 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h @@ -18,8 +18,7 @@ * By reserving 32 blocks at a time, we can optimize / shortcut how we search * through the bitmaps by looking a word at a time. */ -#define RGRP_RSRV_MINBYTES 8 -#define RGRP_RSRV_MINBLKS ((u32)(RGRP_RSRV_MINBYTES * GFS2_NBBY)) +#define RGRP_RSRV_MINBLKS 32 #define RGRP_RSRV_ADDBLKS 64 struct gfs2_rgrpd; @@ -52,8 +51,10 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, extern int gfs2_rsqa_alloc(struct gfs2_inode *ip); extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs); extern void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount); -extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta); -extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen); +extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, + u64 bstart, u32 blen, int meta); +extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, + u64 bstart, u32 blen); extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip); extern void gfs2_unlink_di(struct inode *inode); extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, @@ -68,7 +69,7 @@ struct gfs2_rgrp_list { extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, u64 block); -extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state); +extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist); extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); extern u64 gfs2_ri_total(struct gfs2_sbd *sdp); extern void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index c212893534ed..ca71163ff7cf 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -854,10 +854,10 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) return error; + flush_workqueue(gfs2_delete_workqueue); kthread_stop(sdp->sd_quotad_process); kthread_stop(sdp->sd_logd_process); - flush_workqueue(gfs2_delete_workqueue); gfs2_quota_sync(sdp->sd_vfs, 0); gfs2_statfs_sync(sdp->sd_vfs, 0); @@ -971,7 +971,7 @@ void gfs2_freeze_func(struct work_struct *work) error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0, &freeze_gh); if (error) { - printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error); + printk(KERN_INFO "GFS2: couldn't get freeze lock : %d\n", error); gfs2_assert_withdraw(sdp, 0); } else { diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 064c9a0ef046..423bc2d03dd8 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -74,13 +74,13 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, return error; } -static void gfs2_print_trans(const struct gfs2_trans *tr) +static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr) { - pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip); - pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n", + fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip); + fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n", tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, test_bit(TR_TOUCHED, &tr->tr_flags)); - pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n", + fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n", tr->tr_num_buf_new, tr->tr_num_buf_rm, tr->tr_num_databuf_new, tr->tr_num_databuf_rm, tr->tr_num_revoke, tr->tr_num_revoke_rm); @@ -109,7 +109,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) && (tr->tr_num_revoke <= tr->tr_revokes))) - gfs2_print_trans(tr); + gfs2_print_trans(sdp, tr); gfs2_log_commit(sdp, tr); if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags)) @@ -225,12 +225,13 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) { - pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n", + fs_err(sdp, "Attempting to add uninitialised block to " + "journal (inplace block=%lld)\n", (unsigned long long)bd->bd_bh->b_blocknr); BUG(); } if (unlikely(state == SFS_FROZEN)) { - printk(KERN_INFO "GFS2:adding buf while frozen\n"); + fs_info(sdp, "GFS2:adding buf while frozen\n"); gfs2_assert_withdraw(sdp, 0); } gfs2_pin(sdp, bd->bd_bh); diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index 59c811de0dc7..0a814ccac41d 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -19,6 +19,7 @@ #include "gfs2.h" #include "incore.h" #include "glock.h" +#include "rgrp.h" #include "util.h" struct kmem_cache *gfs2_glock_cachep __read_mostly; @@ -181,6 +182,8 @@ int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide, { struct gfs2_sbd *sdp = rgd->rd_sbd; int rv; + + gfs2_rgrp_dump(NULL, rgd->rd_gl); rv = gfs2_lm_withdraw(sdp, "fatal: filesystem consistency error\n" " RG = %llu\n" @@ -256,12 +259,13 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh, const char *function, char *file, unsigned int line, bool withdraw) { - fs_err(sdp, - "fatal: I/O error\n" - " block = %llu\n" - " function = %s, file = %s, line = %u\n", - (unsigned long long)bh->b_blocknr, - function, file, line); + if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) + fs_err(sdp, + "fatal: I/O error\n" + " block = %llu\n" + " function = %s, file = %s, line = %u\n", + (unsigned long long)bh->b_blocknr, + function, file, line); if (withdraw) gfs2_lm_withdraw(sdp, NULL); } diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index 96ac4aba4738..9278fecba632 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h @@ -86,7 +86,7 @@ static inline int gfs2_meta_check(struct gfs2_sbd *sdp, struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; u32 magic = be32_to_cpu(mh->mh_magic); if (unlikely(magic != GFS2_MAGIC)) { - pr_err("Magic number missing at %llu\n", + fs_err(sdp, "Magic number missing at %llu\n", (unsigned long long)bh->b_blocknr); return -EIO; } diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 38515988aaf7..996c915a9c97 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -283,7 +283,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, blen++; else { if (bstart) - gfs2_free_meta(ip, bstart, blen); + gfs2_free_meta(ip, rgd, bstart, blen); bstart = bn; blen = 1; } @@ -292,7 +292,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, gfs2_add_inode_blocks(&ip->i_inode, -1); } if (bstart) - gfs2_free_meta(ip, bstart, blen); + gfs2_free_meta(ip, rgd, bstart, blen); if (prev && !leave) { u32 len; @@ -1250,6 +1250,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrp_list rlist; + struct gfs2_rgrpd *rgd; struct buffer_head *indbh, *dibh; __be64 *eablk, *end; unsigned int rg_blocks = 0; @@ -1299,11 +1300,10 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) else goto out; - gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); + gfs2_rlist_alloc(&rlist); for (x = 0; x < rlist.rl_rgrps; x++) { - struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); - + rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); rg_blocks += rgd->rd_length; } @@ -1320,6 +1320,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); bstart = 0; + rgd = NULL; blen = 0; for (; eablk < end; eablk++) { @@ -1333,8 +1334,9 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) blen++; else { if (bstart) - gfs2_free_meta(ip, bstart, blen); + gfs2_free_meta(ip, rgd, bstart, blen); bstart = bn; + rgd = gfs2_blk2rgrpd(sdp, bstart, true); blen = 1; } @@ -1342,7 +1344,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) gfs2_add_inode_blocks(&ip->i_inode, -1); } if (bstart) - gfs2_free_meta(ip, bstart, blen); + gfs2_free_meta(ip, rgd, bstart, blen); ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT; @@ -1391,7 +1393,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip) if (error) goto out_gunlock; - gfs2_free_meta(ip, ip->i_eattr, 1); + gfs2_free_meta(ip, rgd, ip->i_eattr, 1); ip->i_eattr = 0; gfs2_add_inode_blocks(&ip->i_inode, -1);