xfs: shorten xfs_scrub_ prefix

Shorten all the metadata checking xfs_scrub_ prefixes to xchk_.  After
this, the only xfs_scrub* symbols are the ones that pertain to both
scrub and repair.  Whitespace damage will be fixed in a subsequent
patch.  There are no functional changes.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
This commit is contained in:
Darrick J. Wong 2018-07-19 12:29:11 -07:00
parent ef97ef26d2
commit c517b3aa02
24 changed files with 1196 additions and 1190 deletions

View File

@ -28,7 +28,7 @@
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_superblock_xref( xchk_superblock_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
@ -43,15 +43,15 @@ xfs_scrub_superblock_xref(
agbno = XFS_SB_BLOCK(mp); agbno = XFS_SB_BLOCK(mp);
error = xfs_scrub_ag_init(sc, agno, &sc->sa); error = xchk_ag_init(sc, agno, &sc->sa);
if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error)) if (!xchk_xref_process_error(sc, agno, agbno, &error))
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, 1); xchk_xref_is_used_space(sc, agbno, 1);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
xfs_scrub_xref_is_not_shared(sc, agbno, 1); xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */ /* scrub teardown will take care of sc->sa for us */
} }
@ -65,7 +65,7 @@ xfs_scrub_superblock_xref(
* sb 0 is ok and we can use its information to check everything else. * sb 0 is ok and we can use its information to check everything else.
*/ */
int int
xfs_scrub_superblock( xchk_superblock(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
@ -98,7 +98,7 @@ xfs_scrub_superblock(
default: default:
break; break;
} }
if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
return error; return error;
sb = XFS_BUF_TO_SBP(bp); sb = XFS_BUF_TO_SBP(bp);
@ -110,46 +110,46 @@ xfs_scrub_superblock(
* checked. * checked.
*/ */
if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Check sb_versionnum bits that are set at mkfs time. */ /* Check sb_versionnum bits that are set at mkfs time. */
vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
@ -163,7 +163,7 @@ xfs_scrub_superblock(
XFS_SB_VERSION_DIRV2BIT); XFS_SB_VERSION_DIRV2BIT);
if ((sb->sb_versionnum & vernum_mask) != if ((sb->sb_versionnum & vernum_mask) !=
(cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Check sb_versionnum bits that can be set after mkfs time. */ /* Check sb_versionnum bits that can be set after mkfs time. */
vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
@ -171,40 +171,40 @@ xfs_scrub_superblock(
XFS_SB_VERSION_QUOTABIT); XFS_SB_VERSION_QUOTABIT);
if ((sb->sb_versionnum & vernum_mask) != if ((sb->sb_versionnum & vernum_mask) !=
(cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_blocklog != mp->m_sb.sb_blocklog) if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_sectlog != mp->m_sb.sb_sectlog) if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_inodelog != mp->m_sb.sb_inodelog) if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_inopblog != mp->m_sb.sb_inopblog) if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_agblklog != mp->m_sb.sb_agblklog) if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_rextslog != mp->m_sb.sb_rextslog) if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
/* /*
* Skip the summary counters since we track them in memory anyway. * Skip the summary counters since we track them in memory anyway.
@ -212,10 +212,10 @@ xfs_scrub_superblock(
*/ */
if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
/* /*
* Skip the quota flags since repair will force quotacheck. * Skip the quota flags since repair will force quotacheck.
@ -223,46 +223,46 @@ xfs_scrub_superblock(
*/ */
if (sb->sb_flags != mp->m_sb.sb_flags) if (sb->sb_flags != mp->m_sb.sb_flags)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Do we see any invalid bits in sb_features2? */ /* Do we see any invalid bits in sb_features2? */
if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
if (sb->sb_features2 != 0) if (sb->sb_features2 != 0)
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
} else { } else {
v2_ok = XFS_SB_VERSION2_OKBITS; v2_ok = XFS_SB_VERSION2_OKBITS;
if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5) if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
v2_ok |= XFS_SB_VERSION2_CRCBIT; v2_ok |= XFS_SB_VERSION2_CRCBIT;
if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_features2 != sb->sb_bad_features2) if (sb->sb_features2 != sb->sb_bad_features2)
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
} }
/* Check sb_features2 flags that are set at mkfs time. */ /* Check sb_features2 flags that are set at mkfs time. */
@ -272,26 +272,26 @@ xfs_scrub_superblock(
XFS_SB_VERSION2_FTYPE); XFS_SB_VERSION2_FTYPE);
if ((sb->sb_features2 & features_mask) != if ((sb->sb_features2 & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Check sb_features2 flags that can be set after mkfs time. */ /* Check sb_features2 flags that can be set after mkfs time. */
features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
if ((sb->sb_features2 & features_mask) != if ((sb->sb_features2 & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (!xfs_sb_version_hascrc(&mp->m_sb)) { if (!xfs_sb_version_hascrc(&mp->m_sb)) {
/* all v5 fields must be zero */ /* all v5 fields must be zero */
if (memchr_inv(&sb->sb_features_compat, 0, if (memchr_inv(&sb->sb_features_compat, 0,
sizeof(struct xfs_dsb) - sizeof(struct xfs_dsb) -
offsetof(struct xfs_dsb, sb_features_compat))) offsetof(struct xfs_dsb, sb_features_compat)))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
} else { } else {
/* Check compat flags; all are set at mkfs time. */ /* Check compat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN); features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
if ((sb->sb_features_compat & features_mask) != if ((sb->sb_features_compat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask)) (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Check ro compat flags; all are set at mkfs time. */ /* Check ro compat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN | features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
@ -301,7 +301,7 @@ xfs_scrub_superblock(
if ((sb->sb_features_ro_compat & features_mask) != if ((sb->sb_features_ro_compat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_ro_compat) & (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
features_mask)) features_mask))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Check incompat flags; all are set at mkfs time. */ /* Check incompat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN | features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
@ -311,22 +311,22 @@ xfs_scrub_superblock(
if ((sb->sb_features_incompat & features_mask) != if ((sb->sb_features_incompat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_incompat) & (cpu_to_be32(mp->m_sb.sb_features_incompat) &
features_mask)) features_mask))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Check log incompat flags; all are set at mkfs time. */ /* Check log incompat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN); features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
if ((sb->sb_features_log_incompat & features_mask) != if ((sb->sb_features_log_incompat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_log_incompat) & (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
features_mask)) features_mask))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
/* Don't care about sb_crc */ /* Don't care about sb_crc */
if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
xfs_scrub_block_set_preen(sc, bp); xchk_block_set_preen(sc, bp);
/* Don't care about sb_lsn */ /* Don't care about sb_lsn */
} }
@ -334,15 +334,15 @@ xfs_scrub_superblock(
if (xfs_sb_version_hasmetauuid(&mp->m_sb)) { if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
/* The metadata UUID must be the same for all supers */ /* The metadata UUID must be the same for all supers */
if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
} }
/* Everything else must be zero. */ /* Everything else must be zero. */
if (memchr_inv(sb + 1, 0, if (memchr_inv(sb + 1, 0,
BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
xfs_scrub_superblock_xref(sc, bp); xchk_superblock_xref(sc, bp);
return error; return error;
} }
@ -351,7 +351,7 @@ xfs_scrub_superblock(
/* Tally freespace record lengths. */ /* Tally freespace record lengths. */
STATIC int STATIC int
xfs_scrub_agf_record_bno_lengths( xchk_agf_record_bno_lengths(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_alloc_rec_incore *rec, struct xfs_alloc_rec_incore *rec,
void *priv) void *priv)
@ -364,7 +364,7 @@ xfs_scrub_agf_record_bno_lengths(
/* Check agf_freeblks */ /* Check agf_freeblks */
static inline void static inline void
xfs_scrub_agf_xref_freeblks( xchk_agf_xref_freeblks(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@ -375,16 +375,16 @@ xfs_scrub_agf_xref_freeblks(
return; return;
error = xfs_alloc_query_all(sc->sa.bno_cur, error = xfs_alloc_query_all(sc->sa.bno_cur,
xfs_scrub_agf_record_bno_lengths, &blocks); xchk_agf_record_bno_lengths, &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return; return;
if (blocks != be32_to_cpu(agf->agf_freeblks)) if (blocks != be32_to_cpu(agf->agf_freeblks))
xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
} }
/* Cross reference the AGF with the cntbt (freespace by length btree) */ /* Cross reference the AGF with the cntbt (freespace by length btree) */
static inline void static inline void
xfs_scrub_agf_xref_cntbt( xchk_agf_xref_cntbt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@ -398,25 +398,25 @@ xfs_scrub_agf_xref_cntbt(
/* Any freespace at all? */ /* Any freespace at all? */
error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return; return;
if (!have) { if (!have) {
if (agf->agf_freeblks != be32_to_cpu(0)) if (agf->agf_freeblks != be32_to_cpu(0))
xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
return; return;
} }
/* Check agf_longest */ /* Check agf_longest */
error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return; return;
if (!have || blocks != be32_to_cpu(agf->agf_longest)) if (!have || blocks != be32_to_cpu(agf->agf_longest))
xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
} }
/* Check the btree block counts in the AGF against the btrees. */ /* Check the btree block counts in the AGF against the btrees. */
STATIC void STATIC void
xfs_scrub_agf_xref_btreeblks( xchk_agf_xref_btreeblks(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@ -428,11 +428,11 @@ xfs_scrub_agf_xref_btreeblks(
/* Check agf_rmap_blocks; set up for agf_btreeblks check */ /* Check agf_rmap_blocks; set up for agf_btreeblks check */
if (sc->sa.rmap_cur) { if (sc->sa.rmap_cur) {
error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
btreeblks = blocks - 1; btreeblks = blocks - 1;
if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
} else { } else {
btreeblks = 0; btreeblks = 0;
} }
@ -447,22 +447,22 @@ xfs_scrub_agf_xref_btreeblks(
/* Check agf_btreeblks */ /* Check agf_btreeblks */
error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return; return;
btreeblks += blocks - 1; btreeblks += blocks - 1;
error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return; return;
btreeblks += blocks - 1; btreeblks += blocks - 1;
if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
} }
/* Check agf_refcount_blocks against tree size */ /* Check agf_refcount_blocks against tree size */
static inline void static inline void
xfs_scrub_agf_xref_refcblks( xchk_agf_xref_refcblks(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@ -473,15 +473,15 @@ xfs_scrub_agf_xref_refcblks(
return; return;
error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return; return;
if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp); xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_agf_xref( xchk_agf_xref(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
@ -494,26 +494,26 @@ xfs_scrub_agf_xref(
agbno = XFS_AGF_BLOCK(mp); agbno = XFS_AGF_BLOCK(mp);
error = xfs_scrub_ag_btcur_init(sc, &sc->sa); error = xchk_ag_btcur_init(sc, &sc->sa);
if (error) if (error)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, 1); xchk_xref_is_used_space(sc, agbno, 1);
xfs_scrub_agf_xref_freeblks(sc); xchk_agf_xref_freeblks(sc);
xfs_scrub_agf_xref_cntbt(sc); xchk_agf_xref_cntbt(sc);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
xfs_scrub_agf_xref_btreeblks(sc); xchk_agf_xref_btreeblks(sc);
xfs_scrub_xref_is_not_shared(sc, agbno, 1); xchk_xref_is_not_shared(sc, agbno, 1);
xfs_scrub_agf_xref_refcblks(sc); xchk_agf_xref_refcblks(sc);
/* scrub teardown will take care of sc->sa for us */ /* scrub teardown will take care of sc->sa for us */
} }
/* Scrub the AGF. */ /* Scrub the AGF. */
int int
xfs_scrub_agf( xchk_agf(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
@ -529,54 +529,54 @@ xfs_scrub_agf(
int error = 0; int error = 0;
agno = sc->sa.agno = sc->sm->sm_agno; agno = sc->sa.agno = sc->sm->sm_agno;
error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp, error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp); &sc->sa.agf_bp, &sc->sa.agfl_bp);
if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
goto out; goto out;
xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp); xchk_buffer_recheck(sc, sc->sa.agf_bp);
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
/* Check the AG length */ /* Check the AG length */
eoag = be32_to_cpu(agf->agf_length); eoag = be32_to_cpu(agf->agf_length);
if (eoag != xfs_ag_block_count(mp, agno)) if (eoag != xfs_ag_block_count(mp, agno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
/* Check the AGF btree roots and levels */ /* Check the AGF btree roots and levels */
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
if (!xfs_verify_agbno(mp, agno, agbno)) if (!xfs_verify_agbno(mp, agno, agbno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
if (!xfs_verify_agbno(mp, agno, agbno)) if (!xfs_verify_agbno(mp, agno, agbno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS) if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS) if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
if (!xfs_verify_agbno(mp, agno, agbno)) if (!xfs_verify_agbno(mp, agno, agbno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS) if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
} }
if (xfs_sb_version_hasreflink(&mp->m_sb)) { if (xfs_sb_version_hasreflink(&mp->m_sb)) {
agbno = be32_to_cpu(agf->agf_refcount_root); agbno = be32_to_cpu(agf->agf_refcount_root);
if (!xfs_verify_agbno(mp, agno, agbno)) if (!xfs_verify_agbno(mp, agno, agbno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_refcount_level); level = be32_to_cpu(agf->agf_refcount_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS) if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
} }
/* Check the AGFL counters */ /* Check the AGFL counters */
@ -588,16 +588,16 @@ xfs_scrub_agf(
else else
fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
if (agfl_count != 0 && fl_count != agfl_count) if (agfl_count != 0 && fl_count != agfl_count)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
xfs_scrub_agf_xref(sc); xchk_agf_xref(sc);
out: out:
return error; return error;
} }
/* AGFL */ /* AGFL */
struct xfs_scrub_agfl_info { struct xchk_agfl_info {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
unsigned int sz_entries; unsigned int sz_entries;
unsigned int nr_entries; unsigned int nr_entries;
@ -607,7 +607,7 @@ struct xfs_scrub_agfl_info {
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_agfl_block_xref( xchk_agfl_block_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
struct xfs_owner_info *oinfo) struct xfs_owner_info *oinfo)
@ -615,20 +615,20 @@ xfs_scrub_agfl_block_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, 1); xchk_xref_is_used_space(sc, agbno, 1);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo); xchk_xref_is_owned_by(sc, agbno, 1, oinfo);
xfs_scrub_xref_is_not_shared(sc, agbno, 1); xchk_xref_is_not_shared(sc, agbno, 1);
} }
/* Scrub an AGFL block. */ /* Scrub an AGFL block. */
STATIC int STATIC int
xfs_scrub_agfl_block( xchk_agfl_block(
struct xfs_mount *mp, struct xfs_mount *mp,
xfs_agblock_t agbno, xfs_agblock_t agbno,
void *priv) void *priv)
{ {
struct xfs_scrub_agfl_info *sai = priv; struct xchk_agfl_info *sai = priv;
struct xfs_scrub_context *sc = sai->sc; struct xfs_scrub_context *sc = sai->sc;
xfs_agnumber_t agno = sc->sa.agno; xfs_agnumber_t agno = sc->sa.agno;
@ -636,9 +636,9 @@ xfs_scrub_agfl_block(
sai->nr_entries < sai->sz_entries) sai->nr_entries < sai->sz_entries)
sai->entries[sai->nr_entries++] = agbno; sai->entries[sai->nr_entries++] = agbno;
else else
xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp); xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
xfs_scrub_agfl_block_xref(sc, agbno, priv); xchk_agfl_block_xref(sc, agbno, priv);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return XFS_BTREE_QUERY_RANGE_ABORT; return XFS_BTREE_QUERY_RANGE_ABORT;
@ -647,7 +647,7 @@ xfs_scrub_agfl_block(
} }
static int static int
xfs_scrub_agblock_cmp( xchk_agblock_cmp(
const void *pa, const void *pa,
const void *pb) const void *pb)
{ {
@ -659,7 +659,7 @@ xfs_scrub_agblock_cmp(
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_agfl_xref( xchk_agfl_xref(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
@ -672,15 +672,15 @@ xfs_scrub_agfl_xref(
agbno = XFS_AGFL_BLOCK(mp); agbno = XFS_AGFL_BLOCK(mp);
error = xfs_scrub_ag_btcur_init(sc, &sc->sa); error = xchk_ag_btcur_init(sc, &sc->sa);
if (error) if (error)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, 1); xchk_xref_is_used_space(sc, agbno, 1);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
xfs_scrub_xref_is_not_shared(sc, agbno, 1); xchk_xref_is_not_shared(sc, agbno, 1);
/* /*
* Scrub teardown will take care of sc->sa for us. Leave sc->sa * Scrub teardown will take care of sc->sa for us. Leave sc->sa
@ -690,10 +690,10 @@ xfs_scrub_agfl_xref(
/* Scrub the AGFL. */ /* Scrub the AGFL. */
int int
xfs_scrub_agfl( xchk_agfl(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_scrub_agfl_info sai; struct xchk_agfl_info sai;
struct xfs_agf *agf; struct xfs_agf *agf;
xfs_agnumber_t agno; xfs_agnumber_t agno;
unsigned int agflcount; unsigned int agflcount;
@ -701,15 +701,15 @@ xfs_scrub_agfl(
int error; int error;
agno = sc->sa.agno = sc->sm->sm_agno; agno = sc->sa.agno = sc->sm->sm_agno;
error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp, error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp); &sc->sa.agf_bp, &sc->sa.agfl_bp);
if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
goto out; goto out;
if (!sc->sa.agf_bp) if (!sc->sa.agf_bp)
return -EFSCORRUPTED; return -EFSCORRUPTED;
xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp); xchk_buffer_recheck(sc, sc->sa.agfl_bp);
xfs_scrub_agfl_xref(sc); xchk_agfl_xref(sc);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
@ -718,7 +718,7 @@ xfs_scrub_agfl(
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
agflcount = be32_to_cpu(agf->agf_flcount); agflcount = be32_to_cpu(agf->agf_flcount);
if (agflcount > xfs_agfl_size(sc->mp)) { if (agflcount > xfs_agfl_size(sc->mp)) {
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out; goto out;
} }
memset(&sai, 0, sizeof(sai)); memset(&sai, 0, sizeof(sai));
@ -734,7 +734,7 @@ xfs_scrub_agfl(
/* Check the blocks in the AGFL. */ /* Check the blocks in the AGFL. */
xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG); xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp), error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
sc->sa.agfl_bp, xfs_scrub_agfl_block, &sai); sc->sa.agfl_bp, xchk_agfl_block, &sai);
if (error == XFS_BTREE_QUERY_RANGE_ABORT) { if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
error = 0; error = 0;
goto out_free; goto out_free;
@ -743,16 +743,16 @@ xfs_scrub_agfl(
goto out_free; goto out_free;
if (agflcount != sai.nr_entries) { if (agflcount != sai.nr_entries) {
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out_free; goto out_free;
} }
/* Sort entries, check for duplicates. */ /* Sort entries, check for duplicates. */
sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
xfs_scrub_agblock_cmp, NULL); xchk_agblock_cmp, NULL);
for (i = 1; i < sai.nr_entries; i++) { for (i = 1; i < sai.nr_entries; i++) {
if (sai.entries[i] == sai.entries[i - 1]) { if (sai.entries[i] == sai.entries[i - 1]) {
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp); xchk_block_set_corrupt(sc, sc->sa.agf_bp);
break; break;
} }
} }
@ -767,7 +767,7 @@ xfs_scrub_agfl(
/* Check agi_count/agi_freecount */ /* Check agi_count/agi_freecount */
static inline void static inline void
xfs_scrub_agi_xref_icounts( xchk_agi_xref_icounts(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp); struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
@ -779,16 +779,16 @@ xfs_scrub_agi_xref_icounts(
return; return;
error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
return; return;
if (be32_to_cpu(agi->agi_count) != icount || if (be32_to_cpu(agi->agi_count) != icount ||
be32_to_cpu(agi->agi_freecount) != freecount) be32_to_cpu(agi->agi_freecount) != freecount)
xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp); xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_agi_xref( xchk_agi_xref(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
@ -801,23 +801,23 @@ xfs_scrub_agi_xref(
agbno = XFS_AGI_BLOCK(mp); agbno = XFS_AGI_BLOCK(mp);
error = xfs_scrub_ag_btcur_init(sc, &sc->sa); error = xchk_ag_btcur_init(sc, &sc->sa);
if (error) if (error)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, 1); xchk_xref_is_used_space(sc, agbno, 1);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1); xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_scrub_agi_xref_icounts(sc); xchk_agi_xref_icounts(sc);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
xfs_scrub_xref_is_not_shared(sc, agbno, 1); xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */ /* scrub teardown will take care of sc->sa for us */
} }
/* Scrub the AGI. */ /* Scrub the AGI. */
int int
xfs_scrub_agi( xchk_agi(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
@ -834,36 +834,36 @@ xfs_scrub_agi(
int error = 0; int error = 0;
agno = sc->sa.agno = sc->sm->sm_agno; agno = sc->sa.agno = sc->sm->sm_agno;
error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp, error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp); &sc->sa.agf_bp, &sc->sa.agfl_bp);
if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
goto out; goto out;
xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp); xchk_buffer_recheck(sc, sc->sa.agi_bp);
agi = XFS_BUF_TO_AGI(sc->sa.agi_bp); agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
/* Check the AG length */ /* Check the AG length */
eoag = be32_to_cpu(agi->agi_length); eoag = be32_to_cpu(agi->agi_length);
if (eoag != xfs_ag_block_count(mp, agno)) if (eoag != xfs_ag_block_count(mp, agno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check btree roots and levels */ /* Check btree roots and levels */
agbno = be32_to_cpu(agi->agi_root); agbno = be32_to_cpu(agi->agi_root);
if (!xfs_verify_agbno(mp, agno, agbno)) if (!xfs_verify_agbno(mp, agno, agbno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_level); level = be32_to_cpu(agi->agi_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS) if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
if (xfs_sb_version_hasfinobt(&mp->m_sb)) { if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
agbno = be32_to_cpu(agi->agi_free_root); agbno = be32_to_cpu(agi->agi_free_root);
if (!xfs_verify_agbno(mp, agno, agbno)) if (!xfs_verify_agbno(mp, agno, agbno))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_free_level); level = be32_to_cpu(agi->agi_free_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS) if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
} }
/* Check inode counters */ /* Check inode counters */
@ -871,16 +871,16 @@ xfs_scrub_agi(
icount = be32_to_cpu(agi->agi_count); icount = be32_to_cpu(agi->agi_count);
if (icount > last_agino - first_agino + 1 || if (icount > last_agino - first_agino + 1 ||
icount < be32_to_cpu(agi->agi_freecount)) icount < be32_to_cpu(agi->agi_freecount))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check inode pointers */ /* Check inode pointers */
agino = be32_to_cpu(agi->agi_newino); agino = be32_to_cpu(agi->agi_newino);
if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino)) if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
agino = be32_to_cpu(agi->agi_dirino); agino = be32_to_cpu(agi->agi_dirino);
if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino)) if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check unlinked inode buckets */ /* Check unlinked inode buckets */
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
@ -888,13 +888,13 @@ xfs_scrub_agi(
if (agino == NULLAGINO) if (agino == NULLAGINO)
continue; continue;
if (!xfs_verify_agino(mp, agno, agino)) if (!xfs_verify_agino(mp, agno, agino))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
} }
if (agi->agi_pad32 != cpu_to_be32(0)) if (agi->agi_pad32 != cpu_to_be32(0))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
xfs_scrub_agi_xref(sc); xchk_agi_xref(sc);
out: out:
return error; return error;
} }

View File

@ -28,11 +28,11 @@
* Set us up to scrub free space btrees. * Set us up to scrub free space btrees.
*/ */
int int
xfs_scrub_setup_ag_allocbt( xchk_setup_ag_allocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_ag_btree(sc, ip, false); return xchk_setup_ag_btree(sc, ip, false);
} }
/* Free space btree scrubber. */ /* Free space btree scrubber. */
@ -41,7 +41,7 @@ xfs_scrub_setup_ag_allocbt(
* bnobt/cntbt record, respectively. * bnobt/cntbt record, respectively.
*/ */
STATIC void STATIC void
xfs_scrub_allocbt_xref_other( xchk_allocbt_xref_other(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
@ -56,32 +56,32 @@ xfs_scrub_allocbt_xref_other(
pcur = &sc->sa.cnt_cur; pcur = &sc->sa.cnt_cur;
else else
pcur = &sc->sa.bno_cur; pcur = &sc->sa.bno_cur;
if (!*pcur || xfs_scrub_skip_xref(sc->sm)) if (!*pcur || xchk_skip_xref(sc->sm))
return; return;
error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec); error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
if (!xfs_scrub_should_check_xref(sc, &error, pcur)) if (!xchk_should_check_xref(sc, &error, pcur))
return; return;
if (!has_otherrec) { if (!has_otherrec) {
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return; return;
} }
error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec); error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
if (!xfs_scrub_should_check_xref(sc, &error, pcur)) if (!xchk_should_check_xref(sc, &error, pcur))
return; return;
if (!has_otherrec) { if (!has_otherrec) {
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return; return;
} }
if (fbno != agbno || flen != len) if (fbno != agbno || flen != len)
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_allocbt_xref( xchk_allocbt_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
@ -89,16 +89,16 @@ xfs_scrub_allocbt_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_allocbt_xref_other(sc, agbno, len); xchk_allocbt_xref_other(sc, agbno, len);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len); xchk_xref_is_not_inode_chunk(sc, agbno, len);
xfs_scrub_xref_has_no_owner(sc, agbno, len); xchk_xref_has_no_owner(sc, agbno, len);
xfs_scrub_xref_is_not_shared(sc, agbno, len); xchk_xref_is_not_shared(sc, agbno, len);
} }
/* Scrub a bnobt/cntbt record. */ /* Scrub a bnobt/cntbt record. */
STATIC int STATIC int
xfs_scrub_allocbt_rec( xchk_allocbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_mount *mp = bs->cur->bc_mp;
@ -113,16 +113,16 @@ xfs_scrub_allocbt_rec(
if (bno + len <= bno || if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) || !xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1)) !xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xfs_scrub_allocbt_xref(bs->sc, bno, len); xchk_allocbt_xref(bs->sc, bno, len);
return error; return error;
} }
/* Scrub the freespace btrees for some AG. */ /* Scrub the freespace btrees for some AG. */
STATIC int STATIC int
xfs_scrub_allocbt( xchk_allocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_btnum_t which) xfs_btnum_t which)
{ {
@ -131,26 +131,26 @@ xfs_scrub_allocbt(
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur; cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
return xfs_scrub_btree(sc, cur, xfs_scrub_allocbt_rec, &oinfo, NULL); return xchk_btree(sc, cur, xchk_allocbt_rec, &oinfo, NULL);
} }
int int
xfs_scrub_bnobt( xchk_bnobt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_allocbt(sc, XFS_BTNUM_BNO); return xchk_allocbt(sc, XFS_BTNUM_BNO);
} }
int int
xfs_scrub_cntbt( xchk_cntbt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT); return xchk_allocbt(sc, XFS_BTNUM_CNT);
} }
/* xref check that the extent is not free */ /* xref check that the extent is not free */
void void
xfs_scrub_xref_is_used_space( xchk_xref_is_used_space(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
@ -158,12 +158,12 @@ xfs_scrub_xref_is_used_space(
bool is_freesp; bool is_freesp;
int error; int error;
if (!sc->sa.bno_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
return; return;
error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp); error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return; return;
if (is_freesp) if (is_freesp)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
} }

View File

@ -32,7 +32,7 @@
/* Set us up to scrub an inode's extended attributes. */ /* Set us up to scrub an inode's extended attributes. */
int int
xfs_scrub_setup_xattr( xchk_setup_xattr(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
@ -50,12 +50,12 @@ xfs_scrub_setup_xattr(
if (!sc->buf) if (!sc->buf)
return -ENOMEM; return -ENOMEM;
return xfs_scrub_setup_inode_contents(sc, ip, 0); return xchk_setup_inode_contents(sc, ip, 0);
} }
/* Extended Attributes */ /* Extended Attributes */
struct xfs_scrub_xattr { struct xchk_xattr {
struct xfs_attr_list_context context; struct xfs_attr_list_context context;
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
}; };
@ -69,22 +69,22 @@ struct xfs_scrub_xattr {
* or if we get more or less data than we expected. * or if we get more or less data than we expected.
*/ */
static void static void
xfs_scrub_xattr_listent( xchk_xattr_listent(
struct xfs_attr_list_context *context, struct xfs_attr_list_context *context,
int flags, int flags,
unsigned char *name, unsigned char *name,
int namelen, int namelen,
int valuelen) int valuelen)
{ {
struct xfs_scrub_xattr *sx; struct xchk_xattr *sx;
struct xfs_da_args args = { NULL }; struct xfs_da_args args = { NULL };
int error = 0; int error = 0;
sx = container_of(context, struct xfs_scrub_xattr, context); sx = container_of(context, struct xchk_xattr, context);
if (flags & XFS_ATTR_INCOMPLETE) { if (flags & XFS_ATTR_INCOMPLETE) {
/* Incomplete attr key, just mark the inode for preening. */ /* Incomplete attr key, just mark the inode for preening. */
xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino); xchk_ino_set_preen(sx->sc, context->dp->i_ino);
return; return;
} }
@ -106,11 +106,11 @@ xfs_scrub_xattr_listent(
error = xfs_attr_get_ilocked(context->dp, &args); error = xfs_attr_get_ilocked(context->dp, &args);
if (error == -EEXIST) if (error == -EEXIST)
error = 0; error = 0;
if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno, if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
&error)) &error))
goto fail_xref; goto fail_xref;
if (args.valuelen != valuelen) if (args.valuelen != valuelen)
xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
args.blkno); args.blkno);
fail_xref: fail_xref:
if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@ -126,7 +126,7 @@ xfs_scrub_xattr_listent(
* the smallest address * the smallest address
*/ */
STATIC bool STATIC bool
xfs_scrub_xattr_set_map( xchk_xattr_set_map(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
unsigned long *map, unsigned long *map,
unsigned int start, unsigned int start,
@ -154,7 +154,7 @@ xfs_scrub_xattr_set_map(
* attr freemap has problems or points to used space. * attr freemap has problems or points to used space.
*/ */
STATIC bool STATIC bool
xfs_scrub_xattr_check_freemap( xchk_xattr_check_freemap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
unsigned long *map, unsigned long *map,
struct xfs_attr3_icleaf_hdr *leafhdr) struct xfs_attr3_icleaf_hdr *leafhdr)
@ -168,7 +168,7 @@ xfs_scrub_xattr_check_freemap(
freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize); freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
bitmap_zero(freemap, mapsize); bitmap_zero(freemap, mapsize);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (!xfs_scrub_xattr_set_map(sc, freemap, if (!xchk_xattr_set_map(sc, freemap,
leafhdr->freemap[i].base, leafhdr->freemap[i].base,
leafhdr->freemap[i].size)) leafhdr->freemap[i].size))
return false; return false;
@ -184,8 +184,8 @@ xfs_scrub_xattr_check_freemap(
* Returns the number of bytes used for the name/value data. * Returns the number of bytes used for the name/value data.
*/ */
STATIC void STATIC void
xfs_scrub_xattr_entry( xchk_xattr_entry(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
char *buf_end, char *buf_end,
struct xfs_attr_leafblock *leaf, struct xfs_attr_leafblock *leaf,
@ -204,17 +204,17 @@ xfs_scrub_xattr_entry(
unsigned int namesize; unsigned int namesize;
if (ent->pad2 != 0) if (ent->pad2 != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
/* Hash values in order? */ /* Hash values in order? */
if (be32_to_cpu(ent->hashval) < *last_hashval) if (be32_to_cpu(ent->hashval) < *last_hashval)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
*last_hashval = be32_to_cpu(ent->hashval); *last_hashval = be32_to_cpu(ent->hashval);
nameidx = be16_to_cpu(ent->nameidx); nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < leafhdr->firstused || if (nameidx < leafhdr->firstused ||
nameidx >= mp->m_attr_geo->blksize) { nameidx >= mp->m_attr_geo->blksize) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return; return;
} }
@ -225,27 +225,27 @@ xfs_scrub_xattr_entry(
be16_to_cpu(lentry->valuelen)); be16_to_cpu(lentry->valuelen));
name_end = (char *)lentry + namesize; name_end = (char *)lentry + namesize;
if (lentry->namelen == 0) if (lentry->namelen == 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} else { } else {
rentry = xfs_attr3_leaf_name_remote(leaf, idx); rentry = xfs_attr3_leaf_name_remote(leaf, idx);
namesize = xfs_attr_leaf_entsize_remote(rentry->namelen); namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
name_end = (char *)rentry + namesize; name_end = (char *)rentry + namesize;
if (rentry->namelen == 0 || rentry->valueblk == 0) if (rentry->namelen == 0 || rentry->valueblk == 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} }
if (name_end > buf_end) if (name_end > buf_end)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize)) if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
*usedbytes += namesize; *usedbytes += namesize;
} }
/* Scrub an attribute leaf. */ /* Scrub an attribute leaf. */
STATIC int STATIC int
xfs_scrub_xattr_block( xchk_xattr_block(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level) int level)
{ {
struct xfs_attr3_icleaf_hdr leafhdr; struct xfs_attr3_icleaf_hdr leafhdr;
@ -275,10 +275,10 @@ xfs_scrub_xattr_block(
if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 || if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
leaf->hdr.info.hdr.pad != 0) leaf->hdr.info.hdr.pad != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} else { } else {
if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0) if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} }
/* Check the leaf header */ /* Check the leaf header */
@ -286,44 +286,44 @@ xfs_scrub_xattr_block(
hdrsize = xfs_attr3_leaf_hdr_size(leaf); hdrsize = xfs_attr3_leaf_hdr_size(leaf);
if (leafhdr.usedbytes > mp->m_attr_geo->blksize) if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused > mp->m_attr_geo->blksize) if (leafhdr.firstused > mp->m_attr_geo->blksize)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused < hdrsize) if (leafhdr.firstused < hdrsize)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize)) if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
entries = xfs_attr3_leaf_entryp(leaf); entries = xfs_attr3_leaf_entryp(leaf);
if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize; buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
/* Mark the leaf entry itself. */ /* Mark the leaf entry itself. */
off = (char *)ent - (char *)leaf; off = (char *)ent - (char *)leaf;
if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off, if (!xchk_xattr_set_map(ds->sc, usedmap, off,
sizeof(xfs_attr_leaf_entry_t))) { sizeof(xfs_attr_leaf_entry_t))) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
/* Check the entry and nameval. */ /* Check the entry and nameval. */
xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr, xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
usedmap, ent, i, &usedbytes, &last_hashval); usedmap, ent, i, &usedbytes, &last_hashval);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
} }
if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr)) if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (leafhdr.usedbytes != usedbytes) if (leafhdr.usedbytes != usedbytes)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
out: out:
return 0; return 0;
@ -331,8 +331,8 @@ xfs_scrub_xattr_block(
/* Scrub a attribute btree record. */ /* Scrub a attribute btree record. */
STATIC int STATIC int
xfs_scrub_xattr_rec( xchk_xattr_rec(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
void *rec) void *rec)
{ {
@ -352,14 +352,14 @@ xfs_scrub_xattr_rec(
blk = &ds->state->path.blk[level]; blk = &ds->state->path.blk[level];
/* Check the whole block, if necessary. */ /* Check the whole block, if necessary. */
error = xfs_scrub_xattr_block(ds, level); error = xchk_xattr_block(ds, level);
if (error) if (error)
goto out; goto out;
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
/* Check the hash of the entry. */ /* Check the hash of the entry. */
error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval); error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error) if (error)
goto out; goto out;
@ -368,7 +368,7 @@ xfs_scrub_xattr_rec(
hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr); hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
nameidx = be16_to_cpu(ent->nameidx); nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) { if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
@ -377,12 +377,12 @@ xfs_scrub_xattr_rec(
badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE | badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
XFS_ATTR_INCOMPLETE); XFS_ATTR_INCOMPLETE);
if ((ent->flags & badflags) != 0) if ((ent->flags & badflags) != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
if (ent->flags & XFS_ATTR_LOCAL) { if (ent->flags & XFS_ATTR_LOCAL) {
lentry = (struct xfs_attr_leaf_name_local *) lentry = (struct xfs_attr_leaf_name_local *)
(((char *)bp->b_addr) + nameidx); (((char *)bp->b_addr) + nameidx);
if (lentry->namelen <= 0) { if (lentry->namelen <= 0) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen); calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
@ -390,13 +390,13 @@ xfs_scrub_xattr_rec(
rentry = (struct xfs_attr_leaf_name_remote *) rentry = (struct xfs_attr_leaf_name_remote *)
(((char *)bp->b_addr) + nameidx); (((char *)bp->b_addr) + nameidx);
if (rentry->namelen <= 0) { if (rentry->namelen <= 0) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
calc_hash = xfs_da_hashname(rentry->name, rentry->namelen); calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
} }
if (calc_hash != hash) if (calc_hash != hash)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
out: out:
return error; return error;
@ -404,10 +404,10 @@ xfs_scrub_xattr_rec(
/* Scrub the extended attribute metadata. */ /* Scrub the extended attribute metadata. */
int int
xfs_scrub_xattr( xchk_xattr(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_scrub_xattr sx; struct xchk_xattr sx;
struct attrlist_cursor_kern cursor = { 0 }; struct attrlist_cursor_kern cursor = { 0 };
xfs_dablk_t last_checked = -1U; xfs_dablk_t last_checked = -1U;
int error = 0; int error = 0;
@ -417,7 +417,7 @@ xfs_scrub_xattr(
memset(&sx, 0, sizeof(sx)); memset(&sx, 0, sizeof(sx));
/* Check attribute tree structure */ /* Check attribute tree structure */
error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec, error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
&last_checked); &last_checked);
if (error) if (error)
goto out; goto out;
@ -429,7 +429,7 @@ xfs_scrub_xattr(
sx.context.dp = sc->ip; sx.context.dp = sc->ip;
sx.context.cursor = &cursor; sx.context.cursor = &cursor;
sx.context.resynch = 1; sx.context.resynch = 1;
sx.context.put_listent = xfs_scrub_xattr_listent; sx.context.put_listent = xchk_xattr_listent;
sx.context.tp = sc->tp; sx.context.tp = sc->tp;
sx.context.flags = ATTR_INCOMPLETE; sx.context.flags = ATTR_INCOMPLETE;
sx.sc = sc; sx.sc = sc;
@ -438,7 +438,7 @@ xfs_scrub_xattr(
* Look up every xattr in this file by name. * Look up every xattr in this file by name.
* *
* Use the backend implementation of xfs_attr_list to call * Use the backend implementation of xfs_attr_list to call
* xfs_scrub_xattr_listent on every attribute key in this inode. * xchk_xattr_listent on every attribute key in this inode.
* In other words, we use the same iterator/callback mechanism * In other words, we use the same iterator/callback mechanism
* that listattr uses to scrub extended attributes, though in our * that listattr uses to scrub extended attributes, though in our
* _listent function, we check the value of the attribute. * _listent function, we check the value of the attribute.
@ -451,7 +451,7 @@ xfs_scrub_xattr(
* locking order. * locking order.
*/ */
error = xfs_attr_list_int_ilocked(&sx.context); error = xfs_attr_list_int_ilocked(&sx.context);
if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
goto out; goto out;
out: out:
return error; return error;

View File

@ -33,13 +33,13 @@
/* Set us up with an inode's bmap. */ /* Set us up with an inode's bmap. */
int int
xfs_scrub_setup_inode_bmap( xchk_setup_inode_bmap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
int error; int error;
error = xfs_scrub_get_inode(sc, ip); error = xchk_get_inode(sc, ip);
if (error) if (error)
goto out; goto out;
@ -60,7 +60,7 @@ xfs_scrub_setup_inode_bmap(
} }
/* Got the inode, lock it and we're ready to go. */ /* Got the inode, lock it and we're ready to go. */
error = xfs_scrub_trans_alloc(sc, 0); error = xchk_trans_alloc(sc, 0);
if (error) if (error)
goto out; goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL; sc->ilock_flags |= XFS_ILOCK_EXCL;
@ -78,7 +78,7 @@ xfs_scrub_setup_inode_bmap(
* is in btree format. * is in btree format.
*/ */
struct xfs_scrub_bmap_info { struct xchk_bmap_info {
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
xfs_fileoff_t lastoff; xfs_fileoff_t lastoff;
bool is_rt; bool is_rt;
@ -88,8 +88,8 @@ struct xfs_scrub_bmap_info {
/* Look for a corresponding rmap for this irec. */ /* Look for a corresponding rmap for this irec. */
static inline bool static inline bool
xfs_scrub_bmap_get_rmap( xchk_bmap_get_rmap(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec, struct xfs_bmbt_irec *irec,
xfs_agblock_t agbno, xfs_agblock_t agbno,
uint64_t owner, uint64_t owner,
@ -120,7 +120,7 @@ xfs_scrub_bmap_get_rmap(
if (info->is_shared) { if (info->is_shared) {
error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
owner, offset, rflags, rmap, &has_rmap); owner, offset, rflags, rmap, &has_rmap);
if (!xfs_scrub_should_check_xref(info->sc, &error, if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur)) &info->sc->sa.rmap_cur))
return false; return false;
goto out; goto out;
@ -131,28 +131,28 @@ xfs_scrub_bmap_get_rmap(
*/ */
error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner, error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
offset, rflags, &has_rmap); offset, rflags, &has_rmap);
if (!xfs_scrub_should_check_xref(info->sc, &error, if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur)) &info->sc->sa.rmap_cur))
return false; return false;
if (!has_rmap) if (!has_rmap)
goto out; goto out;
error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap); error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
if (!xfs_scrub_should_check_xref(info->sc, &error, if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur)) &info->sc->sa.rmap_cur))
return false; return false;
out: out:
if (!has_rmap) if (!has_rmap)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
return has_rmap; return has_rmap;
} }
/* Make sure that we have rmapbt records for this extent. */ /* Make sure that we have rmapbt records for this extent. */
STATIC void STATIC void
xfs_scrub_bmap_xref_rmap( xchk_bmap_xref_rmap(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec, struct xfs_bmbt_irec *irec,
xfs_agblock_t agbno) xfs_agblock_t agbno)
{ {
@ -160,7 +160,7 @@ xfs_scrub_bmap_xref_rmap(
unsigned long long rmap_end; unsigned long long rmap_end;
uint64_t owner; uint64_t owner;
if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm)) if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
return; return;
if (info->whichfork == XFS_COW_FORK) if (info->whichfork == XFS_COW_FORK)
@ -169,14 +169,14 @@ xfs_scrub_bmap_xref_rmap(
owner = info->sc->ip->i_ino; owner = info->sc->ip->i_ino;
/* Find the rmap record for this irec. */ /* Find the rmap record for this irec. */
if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap)) if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
return; return;
/* Check the rmap. */ /* Check the rmap. */
rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
if (rmap.rm_startblock > agbno || if (rmap.rm_startblock > agbno ||
agbno + irec->br_blockcount > rmap_end) agbno + irec->br_blockcount > rmap_end)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* /*
@ -189,12 +189,12 @@ xfs_scrub_bmap_xref_rmap(
rmap.rm_blockcount; rmap.rm_blockcount;
if (rmap.rm_offset > irec->br_startoff || if (rmap.rm_offset > irec->br_startoff ||
irec->br_startoff + irec->br_blockcount > rmap_end) irec->br_startoff + irec->br_blockcount > rmap_end)
xfs_scrub_fblock_xref_set_corrupt(info->sc, xchk_fblock_xref_set_corrupt(info->sc,
info->whichfork, irec->br_startoff); info->whichfork, irec->br_startoff);
} }
if (rmap.rm_owner != owner) if (rmap.rm_owner != owner)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* /*
@ -207,22 +207,22 @@ xfs_scrub_bmap_xref_rmap(
if (owner != XFS_RMAP_OWN_COW && if (owner != XFS_RMAP_OWN_COW &&
irec->br_state == XFS_EXT_UNWRITTEN && irec->br_state == XFS_EXT_UNWRITTEN &&
!(rmap.rm_flags & XFS_RMAP_UNWRITTEN)) !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (info->whichfork == XFS_ATTR_FORK && if (info->whichfork == XFS_ATTR_FORK &&
!(rmap.rm_flags & XFS_RMAP_ATTR_FORK)) !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK) if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork, xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
} }
/* Cross-reference a single rtdev extent record. */ /* Cross-reference a single rtdev extent record. */
STATIC void STATIC void
xfs_scrub_bmap_rt_extent_xref( xchk_bmap_rt_extent_xref(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_bmbt_irec *irec) struct xfs_bmbt_irec *irec)
@ -230,14 +230,14 @@ xfs_scrub_bmap_rt_extent_xref(
if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock, xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
irec->br_blockcount); irec->br_blockcount);
} }
/* Cross-reference a single datadev extent record. */ /* Cross-reference a single datadev extent record. */
STATIC void STATIC void
xfs_scrub_bmap_extent_xref( xchk_bmap_extent_xref(
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_bmbt_irec *irec) struct xfs_bmbt_irec *irec)
@ -255,38 +255,38 @@ xfs_scrub_bmap_extent_xref(
agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
len = irec->br_blockcount; len = irec->br_blockcount;
error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa); error = xchk_ag_init(info->sc, agno, &info->sc->sa);
if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork, if (!xchk_fblock_process_error(info->sc, info->whichfork,
irec->br_startoff, &error)) irec->br_startoff, &error))
return; return;
xfs_scrub_xref_is_used_space(info->sc, agbno, len); xchk_xref_is_used_space(info->sc, agbno, len);
xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len); xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
xfs_scrub_bmap_xref_rmap(info, irec, agbno); xchk_bmap_xref_rmap(info, irec, agbno);
switch (info->whichfork) { switch (info->whichfork) {
case XFS_DATA_FORK: case XFS_DATA_FORK:
if (xfs_is_reflink_inode(info->sc->ip)) if (xfs_is_reflink_inode(info->sc->ip))
break; break;
/* fall through */ /* fall through */
case XFS_ATTR_FORK: case XFS_ATTR_FORK:
xfs_scrub_xref_is_not_shared(info->sc, agbno, xchk_xref_is_not_shared(info->sc, agbno,
irec->br_blockcount); irec->br_blockcount);
break; break;
case XFS_COW_FORK: case XFS_COW_FORK:
xfs_scrub_xref_is_cow_staging(info->sc, agbno, xchk_xref_is_cow_staging(info->sc, agbno,
irec->br_blockcount); irec->br_blockcount);
break; break;
} }
xfs_scrub_ag_free(info->sc, &info->sc->sa); xchk_ag_free(info->sc, &info->sc->sa);
} }
/* Scrub a single extent record. */ /* Scrub a single extent record. */
STATIC int STATIC int
xfs_scrub_bmap_extent( xchk_bmap_extent(
struct xfs_inode *ip, struct xfs_inode *ip,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_scrub_bmap_info *info, struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec) struct xfs_bmbt_irec *irec)
{ {
struct xfs_mount *mp = info->sc->mp; struct xfs_mount *mp = info->sc->mp;
@ -302,12 +302,12 @@ xfs_scrub_bmap_extent(
* from the incore list, for which there is no ordering check. * from the incore list, for which there is no ordering check.
*/ */
if (irec->br_startoff < info->lastoff) if (irec->br_startoff < info->lastoff)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* There should never be a "hole" extent in either extent list. */ /* There should never be a "hole" extent in either extent list. */
if (irec->br_startblock == HOLESTARTBLOCK) if (irec->br_startblock == HOLESTARTBLOCK)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* /*
@ -315,40 +315,40 @@ xfs_scrub_bmap_extent(
* in-core extent scan, and we should never see these in the bmbt. * in-core extent scan, and we should never see these in the bmbt.
*/ */
if (isnullstartblock(irec->br_startblock)) if (isnullstartblock(irec->br_startblock))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* Make sure the extent points to a valid place. */ /* Make sure the extent points to a valid place. */
if (irec->br_blockcount > MAXEXTLEN) if (irec->br_blockcount > MAXEXTLEN)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock) if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
end = irec->br_startblock + irec->br_blockcount - 1; end = irec->br_startblock + irec->br_blockcount - 1;
if (info->is_rt && if (info->is_rt &&
(!xfs_verify_rtbno(mp, irec->br_startblock) || (!xfs_verify_rtbno(mp, irec->br_startblock) ||
!xfs_verify_rtbno(mp, end))) !xfs_verify_rtbno(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (!info->is_rt && if (!info->is_rt &&
(!xfs_verify_fsbno(mp, irec->br_startblock) || (!xfs_verify_fsbno(mp, irec->br_startblock) ||
!xfs_verify_fsbno(mp, end) || !xfs_verify_fsbno(mp, end) ||
XFS_FSB_TO_AGNO(mp, irec->br_startblock) != XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
XFS_FSB_TO_AGNO(mp, end))) XFS_FSB_TO_AGNO(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
/* We don't allow unwritten extents on attr forks. */ /* We don't allow unwritten extents on attr forks. */
if (irec->br_state == XFS_EXT_UNWRITTEN && if (irec->br_state == XFS_EXT_UNWRITTEN &&
info->whichfork == XFS_ATTR_FORK) info->whichfork == XFS_ATTR_FORK)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff); irec->br_startoff);
if (info->is_rt) if (info->is_rt)
xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec); xchk_bmap_rt_extent_xref(info, ip, cur, irec);
else else
xfs_scrub_bmap_extent_xref(info, ip, cur, irec); xchk_bmap_extent_xref(info, ip, cur, irec);
info->lastoff = irec->br_startoff + irec->br_blockcount; info->lastoff = irec->br_startoff + irec->br_blockcount;
return error; return error;
@ -356,12 +356,12 @@ xfs_scrub_bmap_extent(
/* Scrub a bmbt record. */ /* Scrub a bmbt record. */
STATIC int STATIC int
xfs_scrub_bmapbt_rec( xchk_bmapbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_bmbt_irec irec; struct xfs_bmbt_irec irec;
struct xfs_scrub_bmap_info *info = bs->private; struct xchk_bmap_info *info = bs->private;
struct xfs_inode *ip = bs->cur->bc_private.b.ip; struct xfs_inode *ip = bs->cur->bc_private.b.ip;
struct xfs_buf *bp = NULL; struct xfs_buf *bp = NULL;
struct xfs_btree_block *block; struct xfs_btree_block *block;
@ -378,22 +378,22 @@ xfs_scrub_bmapbt_rec(
block = xfs_btree_get_block(bs->cur, i, &bp); block = xfs_btree_get_block(bs->cur, i, &bp);
owner = be64_to_cpu(block->bb_u.l.bb_owner); owner = be64_to_cpu(block->bb_u.l.bb_owner);
if (owner != ip->i_ino) if (owner != ip->i_ino)
xfs_scrub_fblock_set_corrupt(bs->sc, xchk_fblock_set_corrupt(bs->sc,
info->whichfork, 0); info->whichfork, 0);
} }
} }
/* Set up the in-core record and scrub it. */ /* Set up the in-core record and scrub it. */
xfs_bmbt_disk_get_all(&rec->bmbt, &irec); xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec); return xchk_bmap_extent(ip, bs->cur, info, &irec);
} }
/* Scan the btree records. */ /* Scan the btree records. */
STATIC int STATIC int
xfs_scrub_bmap_btree( xchk_bmap_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
struct xfs_scrub_bmap_info *info) struct xchk_bmap_info *info)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
@ -403,12 +403,12 @@ xfs_scrub_bmap_btree(
cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info); error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
xfs_btree_del_cursor(cur, error); xfs_btree_del_cursor(cur, error);
return error; return error;
} }
struct xfs_scrub_bmap_check_rmap_info { struct xchk_bmap_check_rmap_info {
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
int whichfork; int whichfork;
struct xfs_iext_cursor icur; struct xfs_iext_cursor icur;
@ -416,13 +416,13 @@ struct xfs_scrub_bmap_check_rmap_info {
/* Can we find bmaps that fit this rmap? */ /* Can we find bmaps that fit this rmap? */
STATIC int STATIC int
xfs_scrub_bmap_check_rmap( xchk_bmap_check_rmap(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec, struct xfs_rmap_irec *rec,
void *priv) void *priv)
{ {
struct xfs_bmbt_irec irec; struct xfs_bmbt_irec irec;
struct xfs_scrub_bmap_check_rmap_info *sbcri = priv; struct xchk_bmap_check_rmap_info *sbcri = priv;
struct xfs_ifork *ifp; struct xfs_ifork *ifp;
struct xfs_scrub_context *sc = sbcri->sc; struct xfs_scrub_context *sc = sbcri->sc;
bool have_map; bool have_map;
@ -439,14 +439,14 @@ xfs_scrub_bmap_check_rmap(
/* Now look up the bmbt record. */ /* Now look up the bmbt record. */
ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork); ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
if (!ifp) { if (!ifp) {
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
goto out; goto out;
} }
have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset, have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
&sbcri->icur, &irec); &sbcri->icur, &irec);
if (!have_map) if (!have_map)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
/* /*
* bmap extent record lengths are constrained to 2^21 blocks in length * bmap extent record lengths are constrained to 2^21 blocks in length
@ -457,14 +457,14 @@ xfs_scrub_bmap_check_rmap(
*/ */
while (have_map) { while (have_map) {
if (irec.br_startoff != rec->rm_offset) if (irec.br_startoff != rec->rm_offset)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp, if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
cur->bc_private.a.agno, rec->rm_startblock)) cur->bc_private.a.agno, rec->rm_startblock))
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
if (irec.br_blockcount > rec->rm_blockcount) if (irec.br_blockcount > rec->rm_blockcount)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
break; break;
@ -475,7 +475,7 @@ xfs_scrub_bmap_check_rmap(
break; break;
have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec); have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
if (!have_map) if (!have_map)
xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork, xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset); rec->rm_offset);
} }
@ -487,12 +487,12 @@ xfs_scrub_bmap_check_rmap(
/* Make sure each rmap has a corresponding bmbt entry. */ /* Make sure each rmap has a corresponding bmbt entry. */
STATIC int STATIC int
xfs_scrub_bmap_check_ag_rmaps( xchk_bmap_check_ag_rmaps(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_agnumber_t agno) xfs_agnumber_t agno)
{ {
struct xfs_scrub_bmap_check_rmap_info sbcri; struct xchk_bmap_check_rmap_info sbcri;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
struct xfs_buf *agf; struct xfs_buf *agf;
int error; int error;
@ -509,7 +509,7 @@ xfs_scrub_bmap_check_ag_rmaps(
sbcri.sc = sc; sbcri.sc = sc;
sbcri.whichfork = whichfork; sbcri.whichfork = whichfork;
error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri); error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
if (error == XFS_BTREE_QUERY_RANGE_ABORT) if (error == XFS_BTREE_QUERY_RANGE_ABORT)
error = 0; error = 0;
@ -521,7 +521,7 @@ xfs_scrub_bmap_check_ag_rmaps(
/* Make sure each rmap has a corresponding bmbt entry. */ /* Make sure each rmap has a corresponding bmbt entry. */
STATIC int STATIC int
xfs_scrub_bmap_check_rmaps( xchk_bmap_check_rmaps(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork) int whichfork)
{ {
@ -561,7 +561,7 @@ xfs_scrub_bmap_check_rmaps(
return 0; return 0;
for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) { for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno); error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
if (error) if (error)
return error; return error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@ -578,12 +578,12 @@ xfs_scrub_bmap_check_rmaps(
* Then we unconditionally scan the incore extent cache. * Then we unconditionally scan the incore extent cache.
*/ */
STATIC int STATIC int
xfs_scrub_bmap( xchk_bmap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork) int whichfork)
{ {
struct xfs_bmbt_irec irec; struct xfs_bmbt_irec irec;
struct xfs_scrub_bmap_info info = { NULL }; struct xchk_bmap_info info = { NULL };
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = sc->ip; struct xfs_inode *ip = sc->ip;
struct xfs_ifork *ifp; struct xfs_ifork *ifp;
@ -605,7 +605,7 @@ xfs_scrub_bmap(
goto out; goto out;
/* No CoW forks on non-reflink inodes/filesystems. */ /* No CoW forks on non-reflink inodes/filesystems. */
if (!xfs_is_reflink_inode(ip)) { if (!xfs_is_reflink_inode(ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
goto out; goto out;
} }
break; break;
@ -614,7 +614,7 @@ xfs_scrub_bmap(
goto out_check_rmap; goto out_check_rmap;
if (!xfs_sb_version_hasattr(&mp->m_sb) && if (!xfs_sb_version_hasattr(&mp->m_sb) &&
!xfs_sb_version_hasattr2(&mp->m_sb)) !xfs_sb_version_hasattr2(&mp->m_sb))
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
break; break;
default: default:
ASSERT(whichfork == XFS_DATA_FORK); ASSERT(whichfork == XFS_DATA_FORK);
@ -630,22 +630,22 @@ xfs_scrub_bmap(
goto out; goto out;
case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_EXTENTS:
if (!(ifp->if_flags & XFS_IFEXTENTS)) { if (!(ifp->if_flags & XFS_IFEXTENTS)) {
xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out; goto out;
} }
break; break;
case XFS_DINODE_FMT_BTREE: case XFS_DINODE_FMT_BTREE:
if (whichfork == XFS_COW_FORK) { if (whichfork == XFS_COW_FORK) {
xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out; goto out;
} }
error = xfs_scrub_bmap_btree(sc, whichfork, &info); error = xchk_bmap_btree(sc, whichfork, &info);
if (error) if (error)
goto out; goto out;
break; break;
default: default:
xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out; goto out;
} }
@ -655,37 +655,37 @@ xfs_scrub_bmap(
/* Now try to scrub the in-memory extent list. */ /* Now try to scrub the in-memory extent list. */
if (!(ifp->if_flags & XFS_IFEXTENTS)) { if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(sc->tp, ip, whichfork); error = xfs_iread_extents(sc->tp, ip, whichfork);
if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out; goto out;
} }
/* Find the offset of the last extent in the mapping. */ /* Find the offset of the last extent in the mapping. */
error = xfs_bmap_last_offset(ip, &endoff, whichfork); error = xfs_bmap_last_offset(ip, &endoff, whichfork);
if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out; goto out;
/* Scrub extent records. */ /* Scrub extent records. */
info.lastoff = 0; info.lastoff = 0;
ifp = XFS_IFORK_PTR(ip, whichfork); ifp = XFS_IFORK_PTR(ip, whichfork);
for_each_xfs_iext(ifp, &icur, &irec) { for_each_xfs_iext(ifp, &icur, &irec) {
if (xfs_scrub_should_terminate(sc, &error) || if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break; break;
if (isnullstartblock(irec.br_startblock)) if (isnullstartblock(irec.br_startblock))
continue; continue;
if (irec.br_startoff >= endoff) { if (irec.br_startoff >= endoff) {
xfs_scrub_fblock_set_corrupt(sc, whichfork, xchk_fblock_set_corrupt(sc, whichfork,
irec.br_startoff); irec.br_startoff);
goto out; goto out;
} }
error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec); error = xchk_bmap_extent(ip, NULL, &info, &irec);
if (error) if (error)
goto out; goto out;
} }
out_check_rmap: out_check_rmap:
error = xfs_scrub_bmap_check_rmaps(sc, whichfork); error = xchk_bmap_check_rmaps(sc, whichfork);
if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error)) if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
goto out; goto out;
out: out:
return error; return error;
@ -693,27 +693,27 @@ xfs_scrub_bmap(
/* Scrub an inode's data fork. */ /* Scrub an inode's data fork. */
int int
xfs_scrub_bmap_data( xchk_bmap_data(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_bmap(sc, XFS_DATA_FORK); return xchk_bmap(sc, XFS_DATA_FORK);
} }
/* Scrub an inode's attr fork. */ /* Scrub an inode's attr fork. */
int int
xfs_scrub_bmap_attr( xchk_bmap_attr(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_bmap(sc, XFS_ATTR_FORK); return xchk_bmap(sc, XFS_ATTR_FORK);
} }
/* Scrub an inode's CoW fork. */ /* Scrub an inode's CoW fork. */
int int
xfs_scrub_bmap_cow( xchk_bmap_cow(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
if (!xfs_is_reflink_inode(sc->ip)) if (!xfs_is_reflink_inode(sc->ip))
return -ENOENT; return -ENOENT;
return xfs_scrub_bmap(sc, XFS_COW_FORK); return xchk_bmap(sc, XFS_COW_FORK);
} }

View File

@ -29,7 +29,7 @@
* operational errors in common.c. * operational errors in common.c.
*/ */
static bool static bool
__xfs_scrub_btree_process_error( __xchk_btree_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
@ -43,7 +43,7 @@ __xfs_scrub_btree_process_error(
switch (*error) { switch (*error) {
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
@ -53,10 +53,10 @@ __xfs_scrub_btree_process_error(
/* fall through */ /* fall through */
default: default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_op_error(sc, cur, level, trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip); *error, ret_ip);
else else
trace_xfs_scrub_btree_op_error(sc, cur, level, trace_xchk_btree_op_error(sc, cur, level,
*error, ret_ip); *error, ret_ip);
break; break;
} }
@ -64,30 +64,30 @@ __xfs_scrub_btree_process_error(
} }
bool bool
xfs_scrub_btree_process_error( xchk_btree_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
int *error) int *error)
{ {
return __xfs_scrub_btree_process_error(sc, cur, level, error, return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address); XFS_SCRUB_OFLAG_CORRUPT, __return_address);
} }
bool bool
xfs_scrub_btree_xref_process_error( xchk_btree_xref_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
int *error) int *error)
{ {
return __xfs_scrub_btree_process_error(sc, cur, level, error, return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address); XFS_SCRUB_OFLAG_XFAIL, __return_address);
} }
/* Record btree block corruption. */ /* Record btree block corruption. */
static void static void
__xfs_scrub_btree_set_corrupt( __xchk_btree_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level, int level,
@ -97,30 +97,30 @@ __xfs_scrub_btree_set_corrupt(
sc->sm->sm_flags |= errflag; sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_error(sc, cur, level, trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip); ret_ip);
else else
trace_xfs_scrub_btree_error(sc, cur, level, trace_xchk_btree_error(sc, cur, level,
ret_ip); ret_ip);
} }
void void
xfs_scrub_btree_set_corrupt( xchk_btree_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level) int level)
{ {
__xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT, __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
__return_address); __return_address);
} }
void void
xfs_scrub_btree_xref_set_corrupt( xchk_btree_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level) int level)
{ {
__xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT, __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
__return_address); __return_address);
} }
@ -129,8 +129,8 @@ xfs_scrub_btree_xref_set_corrupt(
* keys. * keys.
*/ */
STATIC void STATIC void
xfs_scrub_btree_rec( xchk_btree_rec(
struct xfs_scrub_btree *bs) struct xchk_btree *bs)
{ {
struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_cur *cur = bs->cur;
union xfs_btree_rec *rec; union xfs_btree_rec *rec;
@ -144,11 +144,11 @@ xfs_scrub_btree_rec(
block = xfs_btree_get_block(cur, 0, &bp); block = xfs_btree_get_block(cur, 0, &bp);
rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block); rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
trace_xfs_scrub_btree_rec(bs->sc, cur, 0); trace_xchk_btree_rec(bs->sc, cur, 0);
/* If this isn't the first record, are they in order? */ /* If this isn't the first record, are they in order? */
if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec)) if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
xfs_scrub_btree_set_corrupt(bs->sc, cur, 0); xchk_btree_set_corrupt(bs->sc, cur, 0);
bs->firstrec = false; bs->firstrec = false;
memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len); memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
@ -160,7 +160,7 @@ xfs_scrub_btree_rec(
keyblock = xfs_btree_get_block(cur, 1, &bp); keyblock = xfs_btree_get_block(cur, 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock); keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0) if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return; return;
@ -169,7 +169,7 @@ xfs_scrub_btree_rec(
cur->bc_ops->init_high_key_from_rec(&hkey, rec); cur->bc_ops->init_high_key_from_rec(&hkey, rec);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock); keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0) if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
} }
/* /*
@ -177,8 +177,8 @@ xfs_scrub_btree_rec(
* keys. * keys.
*/ */
STATIC void STATIC void
xfs_scrub_btree_key( xchk_btree_key(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level) int level)
{ {
struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_cur *cur = bs->cur;
@ -191,12 +191,12 @@ xfs_scrub_btree_key(
block = xfs_btree_get_block(cur, level, &bp); block = xfs_btree_get_block(cur, level, &bp);
key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block); key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
trace_xfs_scrub_btree_key(bs->sc, cur, level); trace_xchk_btree_key(bs->sc, cur, level);
/* If this isn't the first key, are they in order? */ /* If this isn't the first key, are they in order? */
if (!bs->firstkey[level] && if (!bs->firstkey[level] &&
!cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key)) !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
bs->firstkey[level] = false; bs->firstkey[level] = false;
memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len); memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
@ -207,7 +207,7 @@ xfs_scrub_btree_key(
keyblock = xfs_btree_get_block(cur, level + 1, &bp); keyblock = xfs_btree_get_block(cur, level + 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock); keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0) if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return; return;
@ -216,7 +216,7 @@ xfs_scrub_btree_key(
key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block); key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock); keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0) if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
} }
/* /*
@ -224,8 +224,8 @@ xfs_scrub_btree_key(
* Callers do not need to set the corrupt flag. * Callers do not need to set the corrupt flag.
*/ */
static bool static bool
xfs_scrub_btree_ptr_ok( xchk_btree_ptr_ok(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
union xfs_btree_ptr *ptr) union xfs_btree_ptr *ptr)
{ {
@ -242,15 +242,15 @@ xfs_scrub_btree_ptr_ok(
else else
res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level); res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
if (!res) if (!res)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return res; return res;
} }
/* Check that a btree block's sibling matches what we expect it. */ /* Check that a btree block's sibling matches what we expect it. */
STATIC int STATIC int
xfs_scrub_btree_block_check_sibling( xchk_btree_block_check_sibling(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
int direction, int direction,
union xfs_btree_ptr *sibling) union xfs_btree_ptr *sibling)
@ -264,7 +264,7 @@ xfs_scrub_btree_block_check_sibling(
int error; int error;
error = xfs_btree_dup_cursor(cur, &ncur); error = xfs_btree_dup_cursor(cur, &ncur);
if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error) || if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) ||
!ncur) !ncur)
return error; return error;
@ -278,7 +278,7 @@ xfs_scrub_btree_block_check_sibling(
else else
error = xfs_btree_decrement(ncur, level + 1, &success); error = xfs_btree_decrement(ncur, level + 1, &success);
if (error == 0 && success) if (error == 0 && success)
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
error = 0; error = 0;
goto out; goto out;
} }
@ -288,23 +288,23 @@ xfs_scrub_btree_block_check_sibling(
error = xfs_btree_increment(ncur, level + 1, &success); error = xfs_btree_increment(ncur, level + 1, &success);
else else
error = xfs_btree_decrement(ncur, level + 1, &success); error = xfs_btree_decrement(ncur, level + 1, &success);
if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error)) if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error))
goto out; goto out;
if (!success) { if (!success) {
xfs_scrub_btree_set_corrupt(bs->sc, cur, level + 1); xchk_btree_set_corrupt(bs->sc, cur, level + 1);
goto out; goto out;
} }
/* Compare upper level pointer to sibling pointer. */ /* Compare upper level pointer to sibling pointer. */
pblock = xfs_btree_get_block(ncur, level + 1, &pbp); pblock = xfs_btree_get_block(ncur, level + 1, &pbp);
pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock); pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp)) if (!xchk_btree_ptr_ok(bs, level + 1, pp))
goto out; goto out;
if (pbp) if (pbp)
xfs_scrub_buffer_recheck(bs->sc, pbp); xchk_buffer_recheck(bs->sc, pbp);
if (xfs_btree_diff_two_ptrs(cur, pp, sibling)) if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
out: out:
xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR); xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR);
return error; return error;
@ -312,8 +312,8 @@ xfs_scrub_btree_block_check_sibling(
/* Check the siblings of a btree block. */ /* Check the siblings of a btree block. */
STATIC int STATIC int
xfs_scrub_btree_block_check_siblings( xchk_btree_block_check_siblings(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
struct xfs_btree_block *block) struct xfs_btree_block *block)
{ {
struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_cur *cur = bs->cur;
@ -330,7 +330,7 @@ xfs_scrub_btree_block_check_siblings(
if (level == cur->bc_nlevels - 1) { if (level == cur->bc_nlevels - 1) {
if (!xfs_btree_ptr_is_null(cur, &leftsib) || if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
!xfs_btree_ptr_is_null(cur, &rightsib)) !xfs_btree_ptr_is_null(cur, &rightsib))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level); xchk_btree_set_corrupt(bs->sc, cur, level);
goto out; goto out;
} }
@ -339,10 +339,10 @@ xfs_scrub_btree_block_check_siblings(
* parent level pointers? * parent level pointers?
* (These function absorbs error codes for us.) * (These function absorbs error codes for us.)
*/ */
error = xfs_scrub_btree_block_check_sibling(bs, level, -1, &leftsib); error = xchk_btree_block_check_sibling(bs, level, -1, &leftsib);
if (error) if (error)
return error; return error;
error = xfs_scrub_btree_block_check_sibling(bs, level, 1, &rightsib); error = xchk_btree_block_check_sibling(bs, level, 1, &rightsib);
if (error) if (error)
return error; return error;
out: out:
@ -360,8 +360,8 @@ struct check_owner {
* an rmap record for it. * an rmap record for it.
*/ */
STATIC int STATIC int
xfs_scrub_btree_check_block_owner( xchk_btree_check_block_owner(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
xfs_daddr_t daddr) xfs_daddr_t daddr)
{ {
@ -380,13 +380,13 @@ xfs_scrub_btree_check_block_owner(
init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS; init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
if (init_sa) { if (init_sa) {
error = xfs_scrub_ag_init(bs->sc, agno, &bs->sc->sa); error = xchk_ag_init(bs->sc, agno, &bs->sc->sa);
if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur, if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
level, &error)) level, &error))
return error; return error;
} }
xfs_scrub_xref_is_used_space(bs->sc, agbno, 1); xchk_xref_is_used_space(bs->sc, agbno, 1);
/* /*
* The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
* have to nullify it (to shut down further block owner checks) if * have to nullify it (to shut down further block owner checks) if
@ -395,20 +395,20 @@ xfs_scrub_btree_check_block_owner(
if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO) if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
bs->cur = NULL; bs->cur = NULL;
xfs_scrub_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo); xchk_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP) if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
bs->cur = NULL; bs->cur = NULL;
if (init_sa) if (init_sa)
xfs_scrub_ag_free(bs->sc, &bs->sc->sa); xchk_ag_free(bs->sc, &bs->sc->sa);
return error; return error;
} }
/* Check the owner of a btree block. */ /* Check the owner of a btree block. */
STATIC int STATIC int
xfs_scrub_btree_check_owner( xchk_btree_check_owner(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
@ -437,7 +437,7 @@ xfs_scrub_btree_check_owner(
return 0; return 0;
} }
return xfs_scrub_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp)); return xchk_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
} }
/* /*
@ -445,8 +445,8 @@ xfs_scrub_btree_check_owner(
* special blocks that don't require that. * special blocks that don't require that.
*/ */
STATIC void STATIC void
xfs_scrub_btree_check_minrecs( xchk_btree_check_minrecs(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
struct xfs_btree_block *block) struct xfs_btree_block *block)
{ {
@ -475,7 +475,7 @@ xfs_scrub_btree_check_minrecs(
if (level >= ok_level) if (level >= ok_level)
return; return;
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
} }
/* /*
@ -483,8 +483,8 @@ xfs_scrub_btree_check_minrecs(
* and buffer pointers (if applicable) if they're ok to use. * and buffer pointers (if applicable) if they're ok to use.
*/ */
STATIC int STATIC int
xfs_scrub_btree_get_block( xchk_btree_get_block(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
union xfs_btree_ptr *pp, union xfs_btree_ptr *pp,
struct xfs_btree_block **pblock, struct xfs_btree_block **pblock,
@ -497,7 +497,7 @@ xfs_scrub_btree_get_block(
*pbp = NULL; *pbp = NULL;
error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock); error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, level, &error) || if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) ||
!*pblock) !*pblock)
return error; return error;
@ -509,19 +509,19 @@ xfs_scrub_btree_get_block(
failed_at = __xfs_btree_check_sblock(bs->cur, *pblock, failed_at = __xfs_btree_check_sblock(bs->cur, *pblock,
level, *pbp); level, *pbp);
if (failed_at) { if (failed_at) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level); xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0; return 0;
} }
if (*pbp) if (*pbp)
xfs_scrub_buffer_recheck(bs->sc, *pbp); xchk_buffer_recheck(bs->sc, *pbp);
xfs_scrub_btree_check_minrecs(bs, level, *pblock); xchk_btree_check_minrecs(bs, level, *pblock);
/* /*
* Check the block's owner; this function absorbs error codes * Check the block's owner; this function absorbs error codes
* for us. * for us.
*/ */
error = xfs_scrub_btree_check_owner(bs, level, *pbp); error = xchk_btree_check_owner(bs, level, *pbp);
if (error) if (error)
return error; return error;
@ -529,7 +529,7 @@ xfs_scrub_btree_get_block(
* Check the block's siblings; this function absorbs error codes * Check the block's siblings; this function absorbs error codes
* for us. * for us.
*/ */
return xfs_scrub_btree_block_check_siblings(bs, *pblock); return xchk_btree_block_check_siblings(bs, *pblock);
} }
/* /*
@ -537,8 +537,8 @@ xfs_scrub_btree_get_block(
* in the parent block. * in the parent block.
*/ */
STATIC void STATIC void
xfs_scrub_btree_block_keys( xchk_btree_block_keys(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
int level, int level,
struct xfs_btree_block *block) struct xfs_btree_block *block)
{ {
@ -562,7 +562,7 @@ xfs_scrub_btree_block_keys(
parent_block); parent_block);
if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0) if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return; return;
@ -573,7 +573,7 @@ xfs_scrub_btree_block_keys(
parent_block); parent_block);
if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0) if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
xfs_scrub_btree_set_corrupt(bs->sc, cur, 1); xchk_btree_set_corrupt(bs->sc, cur, 1);
} }
/* /*
@ -582,14 +582,14 @@ xfs_scrub_btree_block_keys(
* so that the caller can verify individual records. * so that the caller can verify individual records.
*/ */
int int
xfs_scrub_btree( xchk_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
xfs_scrub_btree_rec_fn scrub_fn, xchk_btree_rec_fn scrub_fn,
struct xfs_owner_info *oinfo, struct xfs_owner_info *oinfo,
void *private) void *private)
{ {
struct xfs_scrub_btree bs = { NULL }; struct xchk_btree bs = { NULL };
union xfs_btree_ptr ptr; union xfs_btree_ptr ptr;
union xfs_btree_ptr *pp; union xfs_btree_ptr *pp;
union xfs_btree_rec *recp; union xfs_btree_rec *recp;
@ -614,7 +614,7 @@ xfs_scrub_btree(
/* Don't try to check a tree with a height we can't handle. */ /* Don't try to check a tree with a height we can't handle. */
if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) { if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
xfs_scrub_btree_set_corrupt(sc, cur, 0); xchk_btree_set_corrupt(sc, cur, 0);
goto out; goto out;
} }
@ -624,9 +624,9 @@ xfs_scrub_btree(
*/ */
level = cur->bc_nlevels - 1; level = cur->bc_nlevels - 1;
cur->bc_ops->init_ptr_from_cur(cur, &ptr); cur->bc_ops->init_ptr_from_cur(cur, &ptr);
if (!xfs_scrub_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr)) if (!xchk_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
goto out; goto out;
error = xfs_scrub_btree_get_block(&bs, level, &ptr, &block, &bp); error = xchk_btree_get_block(&bs, level, &ptr, &block, &bp);
if (error || !block) if (error || !block)
goto out; goto out;
@ -639,7 +639,7 @@ xfs_scrub_btree(
/* End of leaf, pop back towards the root. */ /* End of leaf, pop back towards the root. */
if (cur->bc_ptrs[level] > if (cur->bc_ptrs[level] >
be16_to_cpu(block->bb_numrecs)) { be16_to_cpu(block->bb_numrecs)) {
xfs_scrub_btree_block_keys(&bs, level, block); xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1) if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++; cur->bc_ptrs[level + 1]++;
level++; level++;
@ -647,14 +647,14 @@ xfs_scrub_btree(
} }
/* Records in order for scrub? */ /* Records in order for scrub? */
xfs_scrub_btree_rec(&bs); xchk_btree_rec(&bs);
/* Call out to the record checker. */ /* Call out to the record checker. */
recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block); recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
error = bs.scrub_rec(&bs, recp); error = bs.scrub_rec(&bs, recp);
if (error) if (error)
break; break;
if (xfs_scrub_should_terminate(sc, &error) || if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break; break;
@ -664,7 +664,7 @@ xfs_scrub_btree(
/* End of node, pop back towards the root. */ /* End of node, pop back towards the root. */
if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) { if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
xfs_scrub_btree_block_keys(&bs, level, block); xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1) if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++; cur->bc_ptrs[level + 1]++;
level++; level++;
@ -672,16 +672,16 @@ xfs_scrub_btree(
} }
/* Keys in order for scrub? */ /* Keys in order for scrub? */
xfs_scrub_btree_key(&bs, level); xchk_btree_key(&bs, level);
/* Drill another level deeper. */ /* Drill another level deeper. */
pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block); pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
if (!xfs_scrub_btree_ptr_ok(&bs, level, pp)) { if (!xchk_btree_ptr_ok(&bs, level, pp)) {
cur->bc_ptrs[level]++; cur->bc_ptrs[level]++;
continue; continue;
} }
level--; level--;
error = xfs_scrub_btree_get_block(&bs, level, pp, &block, &bp); error = xchk_btree_get_block(&bs, level, pp, &block, &bp);
if (error || !block) if (error || !block)
goto out; goto out;
@ -692,7 +692,7 @@ xfs_scrub_btree(
/* Process deferred owner checks on btree blocks. */ /* Process deferred owner checks on btree blocks. */
list_for_each_entry_safe(co, n, &bs.to_check, list) { list_for_each_entry_safe(co, n, &bs.to_check, list) {
if (!error && bs.cur) if (!error && bs.cur)
error = xfs_scrub_btree_check_block_owner(&bs, error = xchk_btree_check_block_owner(&bs,
co->level, co->daddr); co->level, co->daddr);
list_del(&co->list); list_del(&co->list);
kmem_free(co); kmem_free(co);

View File

@ -9,32 +9,32 @@
/* btree scrub */ /* btree scrub */
/* Check for btree operation errors. */ /* Check for btree operation errors. */
bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc, bool xchk_btree_process_error(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level, int *error); struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree xref operation errors. */ /* Check for btree xref operation errors. */
bool xfs_scrub_btree_xref_process_error(struct xfs_scrub_context *sc, bool xchk_btree_xref_process_error(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level, struct xfs_btree_cur *cur, int level,
int *error); int *error);
/* Check for btree corruption. */ /* Check for btree corruption. */
void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc, void xchk_btree_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level); struct xfs_btree_cur *cur, int level);
/* Check for btree xref discrepancies. */ /* Check for btree xref discrepancies. */
void xfs_scrub_btree_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level); struct xfs_btree_cur *cur, int level);
struct xfs_scrub_btree; struct xchk_btree;
typedef int (*xfs_scrub_btree_rec_fn)( typedef int (*xchk_btree_rec_fn)(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec); union xfs_btree_rec *rec);
struct xfs_scrub_btree { struct xchk_btree {
/* caller-provided scrub state */ /* caller-provided scrub state */
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
xfs_scrub_btree_rec_fn scrub_rec; xchk_btree_rec_fn scrub_rec;
struct xfs_owner_info *oinfo; struct xfs_owner_info *oinfo;
void *private; void *private;
@ -45,8 +45,8 @@ struct xfs_scrub_btree {
bool firstkey[XFS_BTREE_MAXLEVELS]; bool firstkey[XFS_BTREE_MAXLEVELS];
struct list_head to_check; struct list_head to_check;
}; };
int xfs_scrub_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, int xchk_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
xfs_scrub_btree_rec_fn scrub_fn, xchk_btree_rec_fn scrub_fn,
struct xfs_owner_info *oinfo, void *private); struct xfs_owner_info *oinfo, void *private);
#endif /* __XFS_SCRUB_BTREE_H__ */ #endif /* __XFS_SCRUB_BTREE_H__ */

View File

@ -68,7 +68,7 @@
/* Check for operational errors. */ /* Check for operational errors. */
static bool static bool
__xfs_scrub_process_error( __xchk_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_agblock_t bno,
@ -81,7 +81,7 @@ __xfs_scrub_process_error(
return true; return true;
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
@ -90,7 +90,7 @@ __xfs_scrub_process_error(
*error = 0; *error = 0;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_op_error(sc, agno, bno, *error, trace_xchk_op_error(sc, agno, bno, *error,
ret_ip); ret_ip);
break; break;
} }
@ -98,30 +98,30 @@ __xfs_scrub_process_error(
} }
bool bool
xfs_scrub_process_error( xchk_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_agblock_t bno,
int *error) int *error)
{ {
return __xfs_scrub_process_error(sc, agno, bno, error, return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address); XFS_SCRUB_OFLAG_CORRUPT, __return_address);
} }
bool bool
xfs_scrub_xref_process_error( xchk_xref_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_agblock_t bno,
int *error) int *error)
{ {
return __xfs_scrub_process_error(sc, agno, bno, error, return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address); XFS_SCRUB_OFLAG_XFAIL, __return_address);
} }
/* Check for operational errors for a file offset. */ /* Check for operational errors for a file offset. */
static bool static bool
__xfs_scrub_fblock_process_error( __xchk_fblock_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset, xfs_fileoff_t offset,
@ -134,7 +134,7 @@ __xfs_scrub_fblock_process_error(
return true; return true;
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
@ -143,7 +143,7 @@ __xfs_scrub_fblock_process_error(
*error = 0; *error = 0;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error, trace_xchk_file_op_error(sc, whichfork, offset, *error,
ret_ip); ret_ip);
break; break;
} }
@ -151,24 +151,24 @@ __xfs_scrub_fblock_process_error(
} }
bool bool
xfs_scrub_fblock_process_error( xchk_fblock_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset, xfs_fileoff_t offset,
int *error) int *error)
{ {
return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error, return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address); XFS_SCRUB_OFLAG_CORRUPT, __return_address);
} }
bool bool
xfs_scrub_fblock_xref_process_error( xchk_fblock_xref_process_error(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset, xfs_fileoff_t offset,
int *error) int *error)
{ {
return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error, return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address); XFS_SCRUB_OFLAG_XFAIL, __return_address);
} }
@ -186,12 +186,12 @@ xfs_scrub_fblock_xref_process_error(
/* Record a block which could be optimized. */ /* Record a block which could be optimized. */
void void
xfs_scrub_block_set_preen( xchk_block_set_preen(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address); trace_xchk_block_preen(sc, bp->b_bn, __return_address);
} }
/* /*
@ -200,32 +200,32 @@ xfs_scrub_block_set_preen(
* the block location of the inode record itself. * the block location of the inode record itself.
*/ */
void void
xfs_scrub_ino_set_preen( xchk_ino_set_preen(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
trace_xfs_scrub_ino_preen(sc, ino, __return_address); trace_xchk_ino_preen(sc, ino, __return_address);
} }
/* Record a corrupt block. */ /* Record a corrupt block. */
void void
xfs_scrub_block_set_corrupt( xchk_block_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address); trace_xchk_block_error(sc, bp->b_bn, __return_address);
} }
/* Record a corruption while cross-referencing. */ /* Record a corruption while cross-referencing. */
void void
xfs_scrub_block_xref_set_corrupt( xchk_block_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address); trace_xchk_block_error(sc, bp->b_bn, __return_address);
} }
/* /*
@ -234,44 +234,44 @@ xfs_scrub_block_xref_set_corrupt(
* inode record itself. * inode record itself.
*/ */
void void
xfs_scrub_ino_set_corrupt( xchk_ino_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_ino_error(sc, ino, __return_address); trace_xchk_ino_error(sc, ino, __return_address);
} }
/* Record a corruption while cross-referencing with an inode. */ /* Record a corruption while cross-referencing with an inode. */
void void
xfs_scrub_ino_xref_set_corrupt( xchk_ino_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xfs_scrub_ino_error(sc, ino, __return_address); trace_xchk_ino_error(sc, ino, __return_address);
} }
/* Record corruption in a block indexed by a file fork. */ /* Record corruption in a block indexed by a file fork. */
void void
xfs_scrub_fblock_set_corrupt( xchk_fblock_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset) xfs_fileoff_t offset)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address); trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
} }
/* Record a corruption while cross-referencing a fork block. */ /* Record a corruption while cross-referencing a fork block. */
void void
xfs_scrub_fblock_xref_set_corrupt( xchk_fblock_xref_set_corrupt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset) xfs_fileoff_t offset)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address); trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
} }
/* /*
@ -279,32 +279,32 @@ xfs_scrub_fblock_xref_set_corrupt(
* incorrect. * incorrect.
*/ */
void void
xfs_scrub_ino_set_warning( xchk_ino_set_warning(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
trace_xfs_scrub_ino_warning(sc, ino, __return_address); trace_xchk_ino_warning(sc, ino, __return_address);
} }
/* Warn about a block indexed by a file fork that needs review. */ /* Warn about a block indexed by a file fork that needs review. */
void void
xfs_scrub_fblock_set_warning( xchk_fblock_set_warning(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_fileoff_t offset) xfs_fileoff_t offset)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address); trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
} }
/* Signal an incomplete scrub. */ /* Signal an incomplete scrub. */
void void
xfs_scrub_set_incomplete( xchk_set_incomplete(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
trace_xfs_scrub_incomplete(sc, __return_address); trace_xchk_incomplete(sc, __return_address);
} }
/* /*
@ -312,18 +312,18 @@ xfs_scrub_set_incomplete(
* at least according to the reverse mapping data. * at least according to the reverse mapping data.
*/ */
struct xfs_scrub_rmap_ownedby_info { struct xchk_rmap_ownedby_info {
struct xfs_owner_info *oinfo; struct xfs_owner_info *oinfo;
xfs_filblks_t *blocks; xfs_filblks_t *blocks;
}; };
STATIC int STATIC int
xfs_scrub_count_rmap_ownedby_irec( xchk_count_rmap_ownedby_irec(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec, struct xfs_rmap_irec *rec,
void *priv) void *priv)
{ {
struct xfs_scrub_rmap_ownedby_info *sroi = priv; struct xchk_rmap_ownedby_info *sroi = priv;
bool irec_attr; bool irec_attr;
bool oinfo_attr; bool oinfo_attr;
@ -344,19 +344,19 @@ xfs_scrub_count_rmap_ownedby_irec(
* The caller should pass us an rmapbt cursor. * The caller should pass us an rmapbt cursor.
*/ */
int int
xfs_scrub_count_rmap_ownedby_ag( xchk_count_rmap_ownedby_ag(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_owner_info *oinfo, struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks) xfs_filblks_t *blocks)
{ {
struct xfs_scrub_rmap_ownedby_info sroi; struct xchk_rmap_ownedby_info sroi;
sroi.oinfo = oinfo; sroi.oinfo = oinfo;
*blocks = 0; *blocks = 0;
sroi.blocks = blocks; sroi.blocks = blocks;
return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec, return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
&sroi); &sroi);
} }
@ -392,12 +392,12 @@ want_ag_read_header_failure(
/* /*
* Grab all the headers for an AG. * Grab all the headers for an AG.
* *
* The headers should be released by xfs_scrub_ag_free, but as a fail * The headers should be released by xchk_ag_free, but as a fail
* safe we attach all the buffers we grab to the scrub transaction so * safe we attach all the buffers we grab to the scrub transaction so
* they'll all be freed when we cancel it. * they'll all be freed when we cancel it.
*/ */
int int
xfs_scrub_ag_read_headers( xchk_ag_read_headers(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
struct xfs_buf **agi, struct xfs_buf **agi,
@ -425,8 +425,8 @@ xfs_scrub_ag_read_headers(
/* Release all the AG btree cursors. */ /* Release all the AG btree cursors. */
void void
xfs_scrub_ag_btcur_free( xchk_ag_btcur_free(
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
if (sa->refc_cur) if (sa->refc_cur)
xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
@ -451,9 +451,9 @@ xfs_scrub_ag_btcur_free(
/* Initialize all the btree cursors for an AG. */ /* Initialize all the btree cursors for an AG. */
int int
xfs_scrub_ag_btcur_init( xchk_ag_btcur_init(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
xfs_agnumber_t agno = sa->agno; xfs_agnumber_t agno = sa->agno;
@ -511,11 +511,11 @@ xfs_scrub_ag_btcur_init(
/* Release the AG header context and btree cursors. */ /* Release the AG header context and btree cursors. */
void void
xfs_scrub_ag_free( xchk_ag_free(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
xfs_scrub_ag_btcur_free(sa); xchk_ag_btcur_free(sa);
if (sa->agfl_bp) { if (sa->agfl_bp) {
xfs_trans_brelse(sc->tp, sa->agfl_bp); xfs_trans_brelse(sc->tp, sa->agfl_bp);
sa->agfl_bp = NULL; sa->agfl_bp = NULL;
@ -543,30 +543,30 @@ xfs_scrub_ag_free(
* transaction ourselves. * transaction ourselves.
*/ */
int int
xfs_scrub_ag_init( xchk_ag_init(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agnumber_t agno,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
int error; int error;
sa->agno = agno; sa->agno = agno;
error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp, error = xchk_ag_read_headers(sc, agno, &sa->agi_bp,
&sa->agf_bp, &sa->agfl_bp); &sa->agf_bp, &sa->agfl_bp);
if (error) if (error)
return error; return error;
return xfs_scrub_ag_btcur_init(sc, sa); return xchk_ag_btcur_init(sc, sa);
} }
/* /*
* Grab the per-ag structure if we haven't already gotten it. Teardown of the * Grab the per-ag structure if we haven't already gotten it. Teardown of the
* xfs_scrub_ag will release it for us. * xchk_ag will release it for us.
*/ */
void void
xfs_scrub_perag_get( xchk_perag_get(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_scrub_ag *sa) struct xchk_ag *sa)
{ {
if (!sa->pag) if (!sa->pag)
sa->pag = xfs_perag_get(mp, sa->agno); sa->pag = xfs_perag_get(mp, sa->agno);
@ -585,7 +585,7 @@ xfs_scrub_perag_get(
* the metadata object. * the metadata object.
*/ */
int int
xfs_scrub_trans_alloc( xchk_trans_alloc(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
uint resblks) uint resblks)
{ {
@ -598,19 +598,19 @@ xfs_scrub_trans_alloc(
/* Set us up with a transaction and an empty context. */ /* Set us up with a transaction and an empty context. */
int int
xfs_scrub_setup_fs( xchk_setup_fs(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
uint resblks; uint resblks;
resblks = xfs_repair_calc_ag_resblks(sc); resblks = xfs_repair_calc_ag_resblks(sc);
return xfs_scrub_trans_alloc(sc, resblks); return xchk_trans_alloc(sc, resblks);
} }
/* Set us up with AG headers and btree cursors. */ /* Set us up with AG headers and btree cursors. */
int int
xfs_scrub_setup_ag_btree( xchk_setup_ag_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip, struct xfs_inode *ip,
bool force_log) bool force_log)
@ -625,21 +625,21 @@ xfs_scrub_setup_ag_btree(
* document why they need to do so. * document why they need to do so.
*/ */
if (force_log) { if (force_log) {
error = xfs_scrub_checkpoint_log(mp); error = xchk_checkpoint_log(mp);
if (error) if (error)
return error; return error;
} }
error = xfs_scrub_setup_fs(sc, ip); error = xchk_setup_fs(sc, ip);
if (error) if (error)
return error; return error;
return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa); return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
} }
/* Push everything out of the log onto disk. */ /* Push everything out of the log onto disk. */
int int
xfs_scrub_checkpoint_log( xchk_checkpoint_log(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
int error; int error;
@ -657,7 +657,7 @@ xfs_scrub_checkpoint_log(
* The inode is not locked. * The inode is not locked.
*/ */
int int
xfs_scrub_get_inode( xchk_get_inode(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip_in) struct xfs_inode *ip_in)
{ {
@ -704,7 +704,7 @@ xfs_scrub_get_inode(
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_op_error(sc, trace_xchk_op_error(sc,
XFS_INO_TO_AGNO(mp, sc->sm->sm_ino), XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
error, __return_address); error, __return_address);
@ -721,21 +721,21 @@ xfs_scrub_get_inode(
/* Set us up to scrub a file's contents. */ /* Set us up to scrub a file's contents. */
int int
xfs_scrub_setup_inode_contents( xchk_setup_inode_contents(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip, struct xfs_inode *ip,
unsigned int resblks) unsigned int resblks)
{ {
int error; int error;
error = xfs_scrub_get_inode(sc, ip); error = xchk_get_inode(sc, ip);
if (error) if (error)
return error; return error;
/* Got the inode, lock it and we're ready to go. */ /* Got the inode, lock it and we're ready to go. */
sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags); xfs_ilock(sc->ip, sc->ilock_flags);
error = xfs_scrub_trans_alloc(sc, resblks); error = xchk_trans_alloc(sc, resblks);
if (error) if (error)
goto out; goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL; sc->ilock_flags |= XFS_ILOCK_EXCL;
@ -752,13 +752,13 @@ xfs_scrub_setup_inode_contents(
* the cursor and skip the check. * the cursor and skip the check.
*/ */
bool bool
xfs_scrub_should_check_xref( xchk_should_check_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int *error, int *error,
struct xfs_btree_cur **curpp) struct xfs_btree_cur **curpp)
{ {
/* No point in xref if we already know we're corrupt. */ /* No point in xref if we already know we're corrupt. */
if (xfs_scrub_skip_xref(sc->sm)) if (xchk_skip_xref(sc->sm))
return false; return false;
if (*error == 0) if (*error == 0)
@ -775,7 +775,7 @@ xfs_scrub_should_check_xref(
} }
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
trace_xfs_scrub_xref_error(sc, *error, __return_address); trace_xchk_xref_error(sc, *error, __return_address);
/* /*
* Errors encountered during cross-referencing with another * Errors encountered during cross-referencing with another
@ -787,25 +787,25 @@ xfs_scrub_should_check_xref(
/* Run the structure verifiers on in-memory buffers to detect bad memory. */ /* Run the structure verifiers on in-memory buffers to detect bad memory. */
void void
xfs_scrub_buffer_recheck( xchk_buffer_recheck(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
xfs_failaddr_t fa; xfs_failaddr_t fa;
if (bp->b_ops == NULL) { if (bp->b_ops == NULL) {
xfs_scrub_block_set_corrupt(sc, bp); xchk_block_set_corrupt(sc, bp);
return; return;
} }
if (bp->b_ops->verify_struct == NULL) { if (bp->b_ops->verify_struct == NULL) {
xfs_scrub_set_incomplete(sc); xchk_set_incomplete(sc);
return; return;
} }
fa = bp->b_ops->verify_struct(bp); fa = bp->b_ops->verify_struct(bp);
if (!fa) if (!fa)
return; return;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_block_error(sc, bp->b_bn, fa); trace_xchk_block_error(sc, bp->b_bn, fa);
} }
/* /*
@ -813,7 +813,7 @@ xfs_scrub_buffer_recheck(
* pointed to by sc->ip and the ILOCK must be held. * pointed to by sc->ip and the ILOCK must be held.
*/ */
int int
xfs_scrub_metadata_inode_forks( xchk_metadata_inode_forks(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
__u32 smtype; __u32 smtype;
@ -825,26 +825,26 @@ xfs_scrub_metadata_inode_forks(
/* Metadata inodes don't live on the rt device. */ /* Metadata inodes don't live on the rt device. */
if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) { if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0; return 0;
} }
/* They should never participate in reflink. */ /* They should never participate in reflink. */
if (xfs_is_reflink_inode(sc->ip)) { if (xfs_is_reflink_inode(sc->ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0; return 0;
} }
/* They also should never have extended attributes. */ /* They also should never have extended attributes. */
if (xfs_inode_hasattr(sc->ip)) { if (xfs_inode_hasattr(sc->ip)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0; return 0;
} }
/* Invoke the data fork scrubber. */ /* Invoke the data fork scrubber. */
smtype = sc->sm->sm_type; smtype = sc->sm->sm_type;
sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD; sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
error = xfs_scrub_bmap_data(sc); error = xchk_bmap_data(sc);
sc->sm->sm_type = smtype; sc->sm->sm_type = smtype;
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error; return error;
@ -853,11 +853,11 @@ xfs_scrub_metadata_inode_forks(
if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) { if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&shared); &shared);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error)) &error))
return error; return error;
if (shared) if (shared)
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
} }
return error; return error;
@ -871,7 +871,7 @@ xfs_scrub_metadata_inode_forks(
* we can't. * we can't.
*/ */
int int
xfs_scrub_ilock_inverted( xchk_ilock_inverted(
struct xfs_inode *ip, struct xfs_inode *ip,
uint lock_mode) uint lock_mode)
{ {

View File

@ -12,7 +12,7 @@
* Note that we're careful not to make any judgements about *error. * Note that we're careful not to make any judgements about *error.
*/ */
static inline bool static inline bool
xfs_scrub_should_terminate( xchk_should_terminate(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int *error) int *error)
{ {
@ -24,121 +24,121 @@ xfs_scrub_should_terminate(
return false; return false;
} }
int xfs_scrub_trans_alloc(struct xfs_scrub_context *sc, uint resblks); int xchk_trans_alloc(struct xfs_scrub_context *sc, uint resblks);
bool xfs_scrub_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno, bool xchk_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int *error); xfs_agblock_t bno, int *error);
bool xfs_scrub_fblock_process_error(struct xfs_scrub_context *sc, int whichfork, bool xchk_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset, int *error); xfs_fileoff_t offset, int *error);
bool xfs_scrub_xref_process_error(struct xfs_scrub_context *sc, bool xchk_xref_process_error(struct xfs_scrub_context *sc,
xfs_agnumber_t agno, xfs_agblock_t bno, int *error); xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
bool xfs_scrub_fblock_xref_process_error(struct xfs_scrub_context *sc, bool xchk_fblock_xref_process_error(struct xfs_scrub_context *sc,
int whichfork, xfs_fileoff_t offset, int *error); int whichfork, xfs_fileoff_t offset, int *error);
void xfs_scrub_block_set_preen(struct xfs_scrub_context *sc, void xchk_block_set_preen(struct xfs_scrub_context *sc,
struct xfs_buf *bp); struct xfs_buf *bp);
void xfs_scrub_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino); void xchk_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino);
void xfs_scrub_block_set_corrupt(struct xfs_scrub_context *sc, void xchk_block_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_buf *bp); struct xfs_buf *bp);
void xfs_scrub_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino); void xchk_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino);
void xfs_scrub_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork, void xchk_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset); xfs_fileoff_t offset);
void xfs_scrub_block_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_block_xref_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_buf *bp); struct xfs_buf *bp);
void xfs_scrub_ino_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_ino_xref_set_corrupt(struct xfs_scrub_context *sc,
xfs_ino_t ino); xfs_ino_t ino);
void xfs_scrub_fblock_xref_set_corrupt(struct xfs_scrub_context *sc, void xchk_fblock_xref_set_corrupt(struct xfs_scrub_context *sc,
int whichfork, xfs_fileoff_t offset); int whichfork, xfs_fileoff_t offset);
void xfs_scrub_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino); void xchk_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino);
void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork, void xchk_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset); xfs_fileoff_t offset);
void xfs_scrub_set_incomplete(struct xfs_scrub_context *sc); void xchk_set_incomplete(struct xfs_scrub_context *sc);
int xfs_scrub_checkpoint_log(struct xfs_mount *mp); int xchk_checkpoint_log(struct xfs_mount *mp);
/* Are we set up for a cross-referencing check? */ /* Are we set up for a cross-referencing check? */
bool xfs_scrub_should_check_xref(struct xfs_scrub_context *sc, int *error, bool xchk_should_check_xref(struct xfs_scrub_context *sc, int *error,
struct xfs_btree_cur **curpp); struct xfs_btree_cur **curpp);
/* Setup functions */ /* Setup functions */
int xfs_scrub_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip); int xchk_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
int xfs_scrub_setup_ag_allocbt(struct xfs_scrub_context *sc, int xchk_setup_ag_allocbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_ag_iallocbt(struct xfs_scrub_context *sc, int xchk_setup_ag_iallocbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_ag_rmapbt(struct xfs_scrub_context *sc, int xchk_setup_ag_rmapbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_ag_refcountbt(struct xfs_scrub_context *sc, int xchk_setup_ag_refcountbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_inode(struct xfs_scrub_context *sc, int xchk_setup_inode(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_inode_bmap(struct xfs_scrub_context *sc, int xchk_setup_inode_bmap(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_inode_bmap_data(struct xfs_scrub_context *sc, int xchk_setup_inode_bmap_data(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_directory(struct xfs_scrub_context *sc, int xchk_setup_directory(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_xattr(struct xfs_scrub_context *sc, int xchk_setup_xattr(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_symlink(struct xfs_scrub_context *sc, int xchk_setup_symlink(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
int xfs_scrub_setup_parent(struct xfs_scrub_context *sc, int xchk_setup_parent(struct xfs_scrub_context *sc,
struct xfs_inode *ip); struct xfs_inode *ip);
#ifdef CONFIG_XFS_RT #ifdef CONFIG_XFS_RT
int xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip); int xchk_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip);
#else #else
static inline int static inline int
xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip) xchk_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip)
{ {
return -ENOENT; return -ENOENT;
} }
#endif #endif
#ifdef CONFIG_XFS_QUOTA #ifdef CONFIG_XFS_QUOTA
int xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip); int xchk_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip);
#else #else
static inline int static inline int
xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip) xchk_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip)
{ {
return -ENOENT; return -ENOENT;
} }
#endif #endif
void xfs_scrub_ag_free(struct xfs_scrub_context *sc, struct xfs_scrub_ag *sa); void xchk_ag_free(struct xfs_scrub_context *sc, struct xchk_ag *sa);
int xfs_scrub_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno, int xchk_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
struct xfs_scrub_ag *sa); struct xchk_ag *sa);
void xfs_scrub_perag_get(struct xfs_mount *mp, struct xfs_scrub_ag *sa); void xchk_perag_get(struct xfs_mount *mp, struct xchk_ag *sa);
int xfs_scrub_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno, int xchk_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
struct xfs_buf **agi, struct xfs_buf **agf, struct xfs_buf **agi, struct xfs_buf **agf,
struct xfs_buf **agfl); struct xfs_buf **agfl);
void xfs_scrub_ag_btcur_free(struct xfs_scrub_ag *sa); void xchk_ag_btcur_free(struct xchk_ag *sa);
int xfs_scrub_ag_btcur_init(struct xfs_scrub_context *sc, int xchk_ag_btcur_init(struct xfs_scrub_context *sc,
struct xfs_scrub_ag *sa); struct xchk_ag *sa);
int xfs_scrub_count_rmap_ownedby_ag(struct xfs_scrub_context *sc, int xchk_count_rmap_ownedby_ag(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_owner_info *oinfo, struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks); xfs_filblks_t *blocks);
int xfs_scrub_setup_ag_btree(struct xfs_scrub_context *sc, int xchk_setup_ag_btree(struct xfs_scrub_context *sc,
struct xfs_inode *ip, bool force_log); struct xfs_inode *ip, bool force_log);
int xfs_scrub_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in); int xchk_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
int xfs_scrub_setup_inode_contents(struct xfs_scrub_context *sc, int xchk_setup_inode_contents(struct xfs_scrub_context *sc,
struct xfs_inode *ip, unsigned int resblks); struct xfs_inode *ip, unsigned int resblks);
void xfs_scrub_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp); void xchk_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp);
/* /*
* Don't bother cross-referencing if we already found corruption or cross * Don't bother cross-referencing if we already found corruption or cross
* referencing discrepancies. * referencing discrepancies.
*/ */
static inline bool xfs_scrub_skip_xref(struct xfs_scrub_metadata *sm) static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
{ {
return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XCORRUPT); XFS_SCRUB_OFLAG_XCORRUPT);
} }
int xfs_scrub_metadata_inode_forks(struct xfs_scrub_context *sc); int xchk_metadata_inode_forks(struct xfs_scrub_context *sc);
int xfs_scrub_ilock_inverted(struct xfs_inode *ip, uint lock_mode); int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
#endif /* __XFS_SCRUB_COMMON_H__ */ #endif /* __XFS_SCRUB_COMMON_H__ */

View File

@ -35,8 +35,8 @@
* operational errors in common.c. * operational errors in common.c.
*/ */
bool bool
xfs_scrub_da_process_error( xchk_da_process_error(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
int *error) int *error)
{ {
@ -48,7 +48,7 @@ xfs_scrub_da_process_error(
switch (*error) { switch (*error) {
case -EDEADLOCK: case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */ /* Used to restart an op with deadlock avoidance. */
trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break; break;
case -EFSBADCRC: case -EFSBADCRC:
case -EFSCORRUPTED: case -EFSCORRUPTED:
@ -57,7 +57,7 @@ xfs_scrub_da_process_error(
*error = 0; *error = 0;
/* fall through */ /* fall through */
default: default:
trace_xfs_scrub_file_op_error(sc, ds->dargs.whichfork, trace_xchk_file_op_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo, xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno), ds->state->path.blk[level].blkno),
*error, __return_address); *error, __return_address);
@ -71,15 +71,15 @@ xfs_scrub_da_process_error(
* operational errors in common.c. * operational errors in common.c.
*/ */
void void
xfs_scrub_da_set_corrupt( xchk_da_set_corrupt(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level) int level)
{ {
struct xfs_scrub_context *sc = ds->sc; struct xfs_scrub_context *sc = ds->sc;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
trace_xfs_scrub_fblock_error(sc, ds->dargs.whichfork, trace_xchk_fblock_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo, xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno), ds->state->path.blk[level].blkno),
__return_address); __return_address);
@ -87,8 +87,8 @@ xfs_scrub_da_set_corrupt(
/* Find an entry at a certain level in a da btree. */ /* Find an entry at a certain level in a da btree. */
STATIC void * STATIC void *
xfs_scrub_da_btree_entry( xchk_da_btree_entry(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
int rec) int rec)
{ {
@ -123,8 +123,8 @@ xfs_scrub_da_btree_entry(
/* Scrub a da btree hash (key). */ /* Scrub a da btree hash (key). */
int int
xfs_scrub_da_btree_hash( xchk_da_btree_hash(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
__be32 *hashp) __be32 *hashp)
{ {
@ -136,7 +136,7 @@ xfs_scrub_da_btree_hash(
/* Is this hash in order? */ /* Is this hash in order? */
hash = be32_to_cpu(*hashp); hash = be32_to_cpu(*hashp);
if (hash < ds->hashes[level]) if (hash < ds->hashes[level])
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
ds->hashes[level] = hash; ds->hashes[level] = hash;
if (level == 0) if (level == 0)
@ -144,10 +144,10 @@ xfs_scrub_da_btree_hash(
/* Is this hash no larger than the parent hash? */ /* Is this hash no larger than the parent hash? */
blks = ds->state->path.blk; blks = ds->state->path.blk;
entry = xfs_scrub_da_btree_entry(ds, level - 1, blks[level - 1].index); entry = xchk_da_btree_entry(ds, level - 1, blks[level - 1].index);
parent_hash = be32_to_cpu(entry->hashval); parent_hash = be32_to_cpu(entry->hashval);
if (parent_hash < hash) if (parent_hash < hash)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return 0; return 0;
} }
@ -157,13 +157,13 @@ xfs_scrub_da_btree_hash(
* pointer. * pointer.
*/ */
STATIC bool STATIC bool
xfs_scrub_da_btree_ptr_ok( xchk_da_btree_ptr_ok(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
xfs_dablk_t blkno) xfs_dablk_t blkno)
{ {
if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) { if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return false; return false;
} }
@ -176,7 +176,7 @@ xfs_scrub_da_btree_ptr_ok(
* leaf1, we must multiplex the verifiers. * leaf1, we must multiplex the verifiers.
*/ */
static void static void
xfs_scrub_da_btree_read_verify( xchk_da_btree_read_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_da_blkinfo *info = bp->b_addr; struct xfs_da_blkinfo *info = bp->b_addr;
@ -198,7 +198,7 @@ xfs_scrub_da_btree_read_verify(
} }
} }
static void static void
xfs_scrub_da_btree_write_verify( xchk_da_btree_write_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_da_blkinfo *info = bp->b_addr; struct xfs_da_blkinfo *info = bp->b_addr;
@ -220,7 +220,7 @@ xfs_scrub_da_btree_write_verify(
} }
} }
static void * static void *
xfs_scrub_da_btree_verify( xchk_da_btree_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_da_blkinfo *info = bp->b_addr; struct xfs_da_blkinfo *info = bp->b_addr;
@ -236,17 +236,17 @@ xfs_scrub_da_btree_verify(
} }
} }
static const struct xfs_buf_ops xfs_scrub_da_btree_buf_ops = { static const struct xfs_buf_ops xchk_da_btree_buf_ops = {
.name = "xfs_scrub_da_btree", .name = "xchk_da_btree",
.verify_read = xfs_scrub_da_btree_read_verify, .verify_read = xchk_da_btree_read_verify,
.verify_write = xfs_scrub_da_btree_write_verify, .verify_write = xchk_da_btree_write_verify,
.verify_struct = xfs_scrub_da_btree_verify, .verify_struct = xchk_da_btree_verify,
}; };
/* Check a block's sibling. */ /* Check a block's sibling. */
STATIC int STATIC int
xfs_scrub_da_btree_block_check_sibling( xchk_da_btree_block_check_sibling(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
int direction, int direction,
xfs_dablk_t sibling) xfs_dablk_t sibling)
@ -265,7 +265,7 @@ xfs_scrub_da_btree_block_check_sibling(
error = xfs_da3_path_shift(ds->state, &ds->state->altpath, error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval); direction, false, &retval);
if (error == 0 && retval == 0) if (error == 0 && retval == 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
error = 0; error = 0;
goto out; goto out;
} }
@ -273,19 +273,19 @@ xfs_scrub_da_btree_block_check_sibling(
/* Move the alternate cursor one block in the direction given. */ /* Move the alternate cursor one block in the direction given. */
error = xfs_da3_path_shift(ds->state, &ds->state->altpath, error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval); direction, false, &retval);
if (!xfs_scrub_da_process_error(ds, level, &error)) if (!xchk_da_process_error(ds, level, &error))
return error; return error;
if (retval) { if (retval) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return error; return error;
} }
if (ds->state->altpath.blk[level].bp) if (ds->state->altpath.blk[level].bp)
xfs_scrub_buffer_recheck(ds->sc, xchk_buffer_recheck(ds->sc,
ds->state->altpath.blk[level].bp); ds->state->altpath.blk[level].bp);
/* Compare upper level pointer to sibling pointer. */ /* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling) if (ds->state->altpath.blk[level].blkno != sibling)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp); xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
out: out:
return error; return error;
@ -293,8 +293,8 @@ xfs_scrub_da_btree_block_check_sibling(
/* Check a block's sibling pointers. */ /* Check a block's sibling pointers. */
STATIC int STATIC int
xfs_scrub_da_btree_block_check_siblings( xchk_da_btree_block_check_siblings(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
struct xfs_da_blkinfo *hdr) struct xfs_da_blkinfo *hdr)
{ {
@ -308,7 +308,7 @@ xfs_scrub_da_btree_block_check_siblings(
/* Top level blocks should not have sibling pointers. */ /* Top level blocks should not have sibling pointers. */
if (level == 0) { if (level == 0) {
if (forw != 0 || back != 0) if (forw != 0 || back != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
return 0; return 0;
} }
@ -316,10 +316,10 @@ xfs_scrub_da_btree_block_check_siblings(
* Check back (left) and forw (right) pointers. These functions * Check back (left) and forw (right) pointers. These functions
* absorb error codes for us. * absorb error codes for us.
*/ */
error = xfs_scrub_da_btree_block_check_sibling(ds, level, 0, back); error = xchk_da_btree_block_check_sibling(ds, level, 0, back);
if (error) if (error)
goto out; goto out;
error = xfs_scrub_da_btree_block_check_sibling(ds, level, 1, forw); error = xchk_da_btree_block_check_sibling(ds, level, 1, forw);
out: out:
memset(&ds->state->altpath, 0, sizeof(ds->state->altpath)); memset(&ds->state->altpath, 0, sizeof(ds->state->altpath));
@ -328,8 +328,8 @@ xfs_scrub_da_btree_block_check_siblings(
/* Load a dir/attribute block from a btree. */ /* Load a dir/attribute block from a btree. */
STATIC int STATIC int
xfs_scrub_da_btree_block( xchk_da_btree_block(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
xfs_dablk_t blkno) xfs_dablk_t blkno)
{ {
@ -355,17 +355,17 @@ xfs_scrub_da_btree_block(
/* Check the pointer. */ /* Check the pointer. */
blk->blkno = blkno; blk->blkno = blkno;
if (!xfs_scrub_da_btree_ptr_ok(ds, level, blkno)) if (!xchk_da_btree_ptr_ok(ds, level, blkno))
goto out_nobuf; goto out_nobuf;
/* Read the buffer. */ /* Read the buffer. */
error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2, error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2,
&blk->bp, dargs->whichfork, &blk->bp, dargs->whichfork,
&xfs_scrub_da_btree_buf_ops); &xchk_da_btree_buf_ops);
if (!xfs_scrub_da_process_error(ds, level, &error)) if (!xchk_da_process_error(ds, level, &error))
goto out_nobuf; goto out_nobuf;
if (blk->bp) if (blk->bp)
xfs_scrub_buffer_recheck(ds->sc, blk->bp); xchk_buffer_recheck(ds->sc, blk->bp);
/* /*
* We didn't find a dir btree root block, which means that * We didn't find a dir btree root block, which means that
@ -378,7 +378,7 @@ xfs_scrub_da_btree_block(
/* It's /not/ ok for attr trees not to have a da btree. */ /* It's /not/ ok for attr trees not to have a da btree. */
if (blk->bp == NULL) { if (blk->bp == NULL) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_nobuf; goto out_nobuf;
} }
@ -388,17 +388,17 @@ xfs_scrub_da_btree_block(
/* We only started zeroing the header on v5 filesystems. */ /* We only started zeroing the header on v5 filesystems. */
if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad) if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
/* Check the owner. */ /* Check the owner. */
if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) { if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) {
owner = be64_to_cpu(hdr3->owner); owner = be64_to_cpu(hdr3->owner);
if (owner != ip->i_ino) if (owner != ip->i_ino)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
} }
/* Check the siblings. */ /* Check the siblings. */
error = xfs_scrub_da_btree_block_check_siblings(ds, level, &hdr3->hdr); error = xchk_da_btree_block_check_siblings(ds, level, &hdr3->hdr);
if (error) if (error)
goto out; goto out;
@ -411,7 +411,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_ATTR_LEAF_MAGIC; blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs); blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs);
if (ds->tree_level != 0) if (ds->tree_level != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
break; break;
case XFS_DIR2_LEAFN_MAGIC: case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC: case XFS_DIR3_LEAFN_MAGIC:
@ -420,7 +420,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAFN_MAGIC; blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs); blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0) if (ds->tree_level != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
break; break;
case XFS_DIR2_LEAF1_MAGIC: case XFS_DIR2_LEAF1_MAGIC:
case XFS_DIR3_LEAF1_MAGIC: case XFS_DIR3_LEAF1_MAGIC:
@ -429,7 +429,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAF1_MAGIC; blk->magic = XFS_DIR2_LEAF1_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs); blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0) if (ds->tree_level != 0)
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
break; break;
case XFS_DA_NODE_MAGIC: case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC: case XFS_DA3_NODE_MAGIC:
@ -443,13 +443,13 @@ xfs_scrub_da_btree_block(
blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval); blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval);
if (level == 0) { if (level == 0) {
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) { if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_freebp; goto out_freebp;
} }
ds->tree_level = nodehdr.level; ds->tree_level = nodehdr.level;
} else { } else {
if (ds->tree_level != nodehdr.level) { if (ds->tree_level != nodehdr.level) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_freebp; goto out_freebp;
} }
} }
@ -457,7 +457,7 @@ xfs_scrub_da_btree_block(
/* XXX: Check hdr3.pad32 once we know how to fix it. */ /* XXX: Check hdr3.pad32 once we know how to fix it. */
break; break;
default: default:
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out_freebp; goto out_freebp;
} }
@ -473,13 +473,13 @@ xfs_scrub_da_btree_block(
/* Visit all nodes and leaves of a da btree. */ /* Visit all nodes and leaves of a da btree. */
int int
xfs_scrub_da_btree( xchk_da_btree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int whichfork, int whichfork,
xfs_scrub_da_btree_rec_fn scrub_fn, xchk_da_btree_rec_fn scrub_fn,
void *private) void *private)
{ {
struct xfs_scrub_da_btree ds = {}; struct xchk_da_btree ds = {};
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_da_state_blk *blks; struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *key; struct xfs_da_node_entry *key;
@ -517,7 +517,7 @@ xfs_scrub_da_btree(
/* Find the root of the da tree, if present. */ /* Find the root of the da tree, if present. */
blks = ds.state->path.blk; blks = ds.state->path.blk;
error = xfs_scrub_da_btree_block(&ds, level, blkno); error = xchk_da_btree_block(&ds, level, blkno);
if (error) if (error)
goto out_state; goto out_state;
/* /*
@ -542,12 +542,12 @@ xfs_scrub_da_btree(
} }
/* Dispatch record scrubbing. */ /* Dispatch record scrubbing. */
rec = xfs_scrub_da_btree_entry(&ds, level, rec = xchk_da_btree_entry(&ds, level,
blks[level].index); blks[level].index);
error = scrub_fn(&ds, level, rec); error = scrub_fn(&ds, level, rec);
if (error) if (error)
break; break;
if (xfs_scrub_should_terminate(sc, &error) || if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break; break;
@ -566,8 +566,8 @@ xfs_scrub_da_btree(
} }
/* Hashes in order for scrub? */ /* Hashes in order for scrub? */
key = xfs_scrub_da_btree_entry(&ds, level, blks[level].index); key = xchk_da_btree_entry(&ds, level, blks[level].index);
error = xfs_scrub_da_btree_hash(&ds, level, &key->hashval); error = xchk_da_btree_hash(&ds, level, &key->hashval);
if (error) if (error)
goto out; goto out;
@ -575,7 +575,7 @@ xfs_scrub_da_btree(
blkno = be32_to_cpu(key->before); blkno = be32_to_cpu(key->before);
level++; level++;
ds.tree_level--; ds.tree_level--;
error = xfs_scrub_da_btree_block(&ds, level, blkno); error = xchk_da_btree_block(&ds, level, blkno);
if (error) if (error)
goto out; goto out;
if (blks[level].bp == NULL) if (blks[level].bp == NULL)

View File

@ -8,7 +8,7 @@
/* dir/attr btree */ /* dir/attr btree */
struct xfs_scrub_da_btree { struct xchk_da_btree {
struct xfs_da_args dargs; struct xfs_da_args dargs;
xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH]; xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
int maxrecs[XFS_DA_NODE_MAXDEPTH]; int maxrecs[XFS_DA_NODE_MAXDEPTH];
@ -28,18 +28,18 @@ struct xfs_scrub_da_btree {
int tree_level; int tree_level;
}; };
typedef int (*xfs_scrub_da_btree_rec_fn)(struct xfs_scrub_da_btree *ds, typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds,
int level, void *rec); int level, void *rec);
/* Check for da btree operation errors. */ /* Check for da btree operation errors. */
bool xfs_scrub_da_process_error(struct xfs_scrub_da_btree *ds, int level, int *error); bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error);
/* Check for da btree corruption. */ /* Check for da btree corruption. */
void xfs_scrub_da_set_corrupt(struct xfs_scrub_da_btree *ds, int level); void xchk_da_set_corrupt(struct xchk_da_btree *ds, int level);
int xfs_scrub_da_btree_hash(struct xfs_scrub_da_btree *ds, int level, int xchk_da_btree_hash(struct xchk_da_btree *ds, int level,
__be32 *hashp); __be32 *hashp);
int xfs_scrub_da_btree(struct xfs_scrub_context *sc, int whichfork, int xchk_da_btree(struct xfs_scrub_context *sc, int whichfork,
xfs_scrub_da_btree_rec_fn scrub_fn, void *private); xchk_da_btree_rec_fn scrub_fn, void *private);
#endif /* __XFS_SCRUB_DABTREE_H__ */ #endif /* __XFS_SCRUB_DABTREE_H__ */

View File

@ -31,18 +31,18 @@
/* Set us up to scrub directories. */ /* Set us up to scrub directories. */
int int
xfs_scrub_setup_directory( xchk_setup_directory(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_inode_contents(sc, ip, 0); return xchk_setup_inode_contents(sc, ip, 0);
} }
/* Directories */ /* Directories */
/* Scrub a directory entry. */ /* Scrub a directory entry. */
struct xfs_scrub_dir_ctx { struct xchk_dir_ctx {
/* VFS fill-directory iterator */ /* VFS fill-directory iterator */
struct dir_context dir_iter; struct dir_context dir_iter;
@ -51,8 +51,8 @@ struct xfs_scrub_dir_ctx {
/* Check that an inode's mode matches a given DT_ type. */ /* Check that an inode's mode matches a given DT_ type. */
STATIC int STATIC int
xfs_scrub_dir_check_ftype( xchk_dir_check_ftype(
struct xfs_scrub_dir_ctx *sdc, struct xchk_dir_ctx *sdc,
xfs_fileoff_t offset, xfs_fileoff_t offset,
xfs_ino_t inum, xfs_ino_t inum,
int dtype) int dtype)
@ -64,7 +64,7 @@ xfs_scrub_dir_check_ftype(
if (!xfs_sb_version_hasftype(&mp->m_sb)) { if (!xfs_sb_version_hasftype(&mp->m_sb)) {
if (dtype != DT_UNKNOWN && dtype != DT_DIR) if (dtype != DT_UNKNOWN && dtype != DT_DIR)
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset); offset);
goto out; goto out;
} }
@ -78,7 +78,7 @@ xfs_scrub_dir_check_ftype(
* inodes can trigger immediate inactive cleanup of the inode. * inodes can trigger immediate inactive cleanup of the inode.
*/ */
error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip); error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
if (!xfs_scrub_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset, if (!xchk_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error)) &error))
goto out; goto out;
@ -86,7 +86,7 @@ xfs_scrub_dir_check_ftype(
ino_dtype = xfs_dir3_get_dtype(mp, ino_dtype = xfs_dir3_get_dtype(mp,
xfs_mode_to_ftype(VFS_I(ip)->i_mode)); xfs_mode_to_ftype(VFS_I(ip)->i_mode));
if (ino_dtype != dtype) if (ino_dtype != dtype)
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
iput(VFS_I(ip)); iput(VFS_I(ip));
out: out:
return error; return error;
@ -101,7 +101,7 @@ xfs_scrub_dir_check_ftype(
* we can look up this filename. Finally, we check the ftype. * we can look up this filename. Finally, we check the ftype.
*/ */
STATIC int STATIC int
xfs_scrub_dir_actor( xchk_dir_actor(
struct dir_context *dir_iter, struct dir_context *dir_iter,
const char *name, const char *name,
int namelen, int namelen,
@ -111,13 +111,13 @@ xfs_scrub_dir_actor(
{ {
struct xfs_mount *mp; struct xfs_mount *mp;
struct xfs_inode *ip; struct xfs_inode *ip;
struct xfs_scrub_dir_ctx *sdc; struct xchk_dir_ctx *sdc;
struct xfs_name xname; struct xfs_name xname;
xfs_ino_t lookup_ino; xfs_ino_t lookup_ino;
xfs_dablk_t offset; xfs_dablk_t offset;
int error = 0; int error = 0;
sdc = container_of(dir_iter, struct xfs_scrub_dir_ctx, dir_iter); sdc = container_of(dir_iter, struct xchk_dir_ctx, dir_iter);
ip = sdc->sc->ip; ip = sdc->sc->ip;
mp = ip->i_mount; mp = ip->i_mount;
offset = xfs_dir2_db_to_da(mp->m_dir_geo, offset = xfs_dir2_db_to_da(mp->m_dir_geo,
@ -125,17 +125,17 @@ xfs_scrub_dir_actor(
/* Does this inode number make sense? */ /* Does this inode number make sense? */
if (!xfs_verify_dir_ino(mp, ino)) { if (!xfs_verify_dir_ino(mp, ino)) {
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
goto out; goto out;
} }
if (!strncmp(".", name, namelen)) { if (!strncmp(".", name, namelen)) {
/* If this is "." then check that the inum matches the dir. */ /* If this is "." then check that the inum matches the dir. */
if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR) if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset); offset);
if (ino != ip->i_ino) if (ino != ip->i_ino)
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset); offset);
} else if (!strncmp("..", name, namelen)) { } else if (!strncmp("..", name, namelen)) {
/* /*
@ -143,10 +143,10 @@ xfs_scrub_dir_actor(
* matches this dir. * matches this dir.
*/ */
if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR) if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset); offset);
if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino) if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino)
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset); offset);
} }
@ -156,23 +156,23 @@ xfs_scrub_dir_actor(
xname.type = XFS_DIR3_FT_UNKNOWN; xname.type = XFS_DIR3_FT_UNKNOWN;
error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL); error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
if (!xfs_scrub_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset, if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error)) &error))
goto out; goto out;
if (lookup_ino != ino) { if (lookup_ino != ino) {
xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
goto out; goto out;
} }
/* Verify the file type. This function absorbs error codes. */ /* Verify the file type. This function absorbs error codes. */
error = xfs_scrub_dir_check_ftype(sdc, offset, lookup_ino, type); error = xchk_dir_check_ftype(sdc, offset, lookup_ino, type);
if (error) if (error)
goto out; goto out;
out: out:
/* /*
* A negative error code returned here is supposed to cause the * A negative error code returned here is supposed to cause the
* dir_emit caller (xfs_readdir) to abort the directory iteration * dir_emit caller (xfs_readdir) to abort the directory iteration
* and return zero to xfs_scrub_directory. * and return zero to xchk_directory.
*/ */
if (error == 0 && sdc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (error == 0 && sdc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return -EFSCORRUPTED; return -EFSCORRUPTED;
@ -181,8 +181,8 @@ xfs_scrub_dir_actor(
/* Scrub a directory btree record. */ /* Scrub a directory btree record. */
STATIC int STATIC int
xfs_scrub_dir_rec( xchk_dir_rec(
struct xfs_scrub_da_btree *ds, struct xchk_da_btree *ds,
int level, int level,
void *rec) void *rec)
{ {
@ -203,7 +203,7 @@ xfs_scrub_dir_rec(
int error; int error;
/* Check the hash of the entry. */ /* Check the hash of the entry. */
error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval); error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error) if (error)
goto out; goto out;
@ -218,18 +218,18 @@ xfs_scrub_dir_rec(
rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db); rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db);
if (rec_bno >= mp->m_dir_geo->leafblk) { if (rec_bno >= mp->m_dir_geo->leafblk) {
xfs_scrub_da_set_corrupt(ds, level); xchk_da_set_corrupt(ds, level);
goto out; goto out;
} }
error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp); error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp);
if (!xfs_scrub_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno, if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
&error)) &error))
goto out; goto out;
if (!bp) { if (!bp) {
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out; goto out;
} }
xfs_scrub_buffer_recheck(ds->sc, bp); xchk_buffer_recheck(ds->sc, bp);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_relse; goto out_relse;
@ -240,7 +240,7 @@ xfs_scrub_dir_rec(
p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr); p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr); endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
if (!endp) { if (!endp) {
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse; goto out_relse;
} }
while (p < endp) { while (p < endp) {
@ -258,7 +258,7 @@ xfs_scrub_dir_rec(
p += mp->m_dir_inode_ops->data_entsize(dep->namelen); p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
} }
if (p >= endp) { if (p >= endp) {
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse; goto out_relse;
} }
@ -267,14 +267,14 @@ xfs_scrub_dir_rec(
hash = be32_to_cpu(ent->hashval); hash = be32_to_cpu(ent->hashval);
tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent)); tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
if (!xfs_verify_dir_ino(mp, ino) || tag != off) if (!xfs_verify_dir_ino(mp, ino) || tag != off)
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
if (dent->namelen == 0) { if (dent->namelen == 0) {
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse; goto out_relse;
} }
calc_hash = xfs_da_hashname(dent->name, dent->namelen); calc_hash = xfs_da_hashname(dent->name, dent->namelen);
if (calc_hash != hash) if (calc_hash != hash)
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno); xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
out_relse: out_relse:
xfs_trans_brelse(ds->dargs.trans, bp); xfs_trans_brelse(ds->dargs.trans, bp);
@ -288,7 +288,7 @@ xfs_scrub_dir_rec(
* shortest, and that there aren't any bogus entries. * shortest, and that there aren't any bogus entries.
*/ */
STATIC void STATIC void
xfs_scrub_directory_check_free_entry( xchk_directory_check_free_entry(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_dablk_t lblk, xfs_dablk_t lblk,
struct xfs_dir2_data_free *bf, struct xfs_dir2_data_free *bf,
@ -308,12 +308,12 @@ xfs_scrub_directory_check_free_entry(
return; return;
/* Unused entry should be in the bestfrees but wasn't found. */ /* Unused entry should be in the bestfrees but wasn't found. */
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
} }
/* Check free space info in a directory data block. */ /* Check free space info in a directory data block. */
STATIC int STATIC int
xfs_scrub_directory_data_bestfree( xchk_directory_data_bestfree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_dablk_t lblk, xfs_dablk_t lblk,
bool is_block) bool is_block)
@ -339,15 +339,15 @@ xfs_scrub_directory_data_bestfree(
if (is_block) { if (is_block) {
/* dir block format */ /* dir block format */
if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET)) if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
error = xfs_dir3_block_read(sc->tp, sc->ip, &bp); error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
} else { } else {
/* dir data format */ /* dir data format */
error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp); error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp);
} }
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out; goto out;
xfs_scrub_buffer_recheck(sc, bp); xchk_buffer_recheck(sc, bp);
/* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */ /* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
@ -362,7 +362,7 @@ xfs_scrub_directory_data_bestfree(
if (offset == 0) if (offset == 0)
continue; continue;
if (offset >= mp->m_dir_geo->blksize) { if (offset >= mp->m_dir_geo->blksize) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf; goto out_buf;
} }
dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset); dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset);
@ -372,13 +372,13 @@ xfs_scrub_directory_data_bestfree(
if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) || if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) || be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
tag != ((char *)dup - (char *)bp->b_addr)) { tag != ((char *)dup - (char *)bp->b_addr)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf; goto out_buf;
} }
/* bestfree records should be ordered largest to smallest */ /* bestfree records should be ordered largest to smallest */
if (smallest_bestfree < be16_to_cpu(dfp->length)) { if (smallest_bestfree < be16_to_cpu(dfp->length)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf; goto out_buf;
} }
@ -400,7 +400,7 @@ xfs_scrub_directory_data_bestfree(
dep = (struct xfs_dir2_data_entry *)ptr; dep = (struct xfs_dir2_data_entry *)ptr;
newlen = d_ops->data_entsize(dep->namelen); newlen = d_ops->data_entsize(dep->namelen);
if (newlen <= 0) { if (newlen <= 0) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
lblk); lblk);
goto out_buf; goto out_buf;
} }
@ -411,7 +411,7 @@ xfs_scrub_directory_data_bestfree(
/* Spot check this free entry */ /* Spot check this free entry */
tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)); tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
if (tag != ((char *)dup - (char *)bp->b_addr)) { if (tag != ((char *)dup - (char *)bp->b_addr)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf; goto out_buf;
} }
@ -419,14 +419,14 @@ xfs_scrub_directory_data_bestfree(
* Either this entry is a bestfree or it's smaller than * Either this entry is a bestfree or it's smaller than
* any of the bestfrees. * any of the bestfrees.
*/ */
xfs_scrub_directory_check_free_entry(sc, lblk, bf, dup); xchk_directory_check_free_entry(sc, lblk, bf, dup);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_buf; goto out_buf;
/* Move on. */ /* Move on. */
newlen = be16_to_cpu(dup->length); newlen = be16_to_cpu(dup->length);
if (newlen <= 0) { if (newlen <= 0) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf; goto out_buf;
} }
ptr += newlen; ptr += newlen;
@ -436,11 +436,11 @@ xfs_scrub_directory_data_bestfree(
/* We're required to fill all the space. */ /* We're required to fill all the space. */
if (ptr != endptr) if (ptr != endptr)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
/* Did we see at least as many free slots as there are bestfrees? */ /* Did we see at least as many free slots as there are bestfrees? */
if (nr_frees < nr_bestfrees) if (nr_frees < nr_bestfrees)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
out_buf: out_buf:
xfs_trans_brelse(sc->tp, bp); xfs_trans_brelse(sc->tp, bp);
out: out:
@ -454,7 +454,7 @@ xfs_scrub_directory_data_bestfree(
* array is in order. * array is in order.
*/ */
STATIC void STATIC void
xfs_scrub_directory_check_freesp( xchk_directory_check_freesp(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_dablk_t lblk, xfs_dablk_t lblk,
struct xfs_buf *dbp, struct xfs_buf *dbp,
@ -465,15 +465,15 @@ xfs_scrub_directory_check_freesp(
dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr); dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr);
if (len != be16_to_cpu(dfp->length)) if (len != be16_to_cpu(dfp->length))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
if (len > 0 && be16_to_cpu(dfp->offset) == 0) if (len > 0 && be16_to_cpu(dfp->offset) == 0)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
} }
/* Check free space info in a directory leaf1 block. */ /* Check free space info in a directory leaf1 block. */
STATIC int STATIC int
xfs_scrub_directory_leaf1_bestfree( xchk_directory_leaf1_bestfree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_da_args *args, struct xfs_da_args *args,
xfs_dablk_t lblk) xfs_dablk_t lblk)
@ -497,9 +497,9 @@ xfs_scrub_directory_leaf1_bestfree(
/* Read the free space block. */ /* Read the free space block. */
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp); error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out; goto out;
xfs_scrub_buffer_recheck(sc, bp); xchk_buffer_recheck(sc, bp);
leaf = bp->b_addr; leaf = bp->b_addr;
d_ops->leaf_hdr_from_disk(&leafhdr, leaf); d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
@ -512,7 +512,7 @@ xfs_scrub_directory_leaf1_bestfree(
struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr; struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0)) if (hdr3->pad != cpu_to_be32(0))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
} }
/* /*
@ -520,19 +520,19 @@ xfs_scrub_directory_leaf1_bestfree(
* blocks that can fit under i_size. * blocks that can fit under i_size.
*/ */
if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_d.di_size)) { if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_d.di_size)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out; goto out;
} }
/* Is the leaf count even remotely sane? */ /* Is the leaf count even remotely sane? */
if (leafhdr.count > d_ops->leaf_max_ents(geo)) { if (leafhdr.count > d_ops->leaf_max_ents(geo)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out; goto out;
} }
/* Leaves and bests don't overlap in leaf format. */ /* Leaves and bests don't overlap in leaf format. */
if ((char *)&ents[leafhdr.count] > (char *)bestp) { if ((char *)&ents[leafhdr.count] > (char *)bestp) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out; goto out;
} }
@ -540,13 +540,13 @@ xfs_scrub_directory_leaf1_bestfree(
for (i = 0; i < leafhdr.count; i++) { for (i = 0; i < leafhdr.count; i++) {
hash = be32_to_cpu(ents[i].hashval); hash = be32_to_cpu(ents[i].hashval);
if (i > 0 && lasthash > hash) if (i > 0 && lasthash > hash)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
lasthash = hash; lasthash = hash;
if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++; stale++;
} }
if (leafhdr.stale != stale) if (leafhdr.stale != stale)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
@ -557,10 +557,10 @@ xfs_scrub_directory_leaf1_bestfree(
continue; continue;
error = xfs_dir3_data_read(sc->tp, sc->ip, error = xfs_dir3_data_read(sc->tp, sc->ip,
i * args->geo->fsbcount, -1, &dbp); i * args->geo->fsbcount, -1, &dbp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error)) &error))
break; break;
xfs_scrub_directory_check_freesp(sc, lblk, dbp, best); xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp); xfs_trans_brelse(sc->tp, dbp);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
@ -571,7 +571,7 @@ xfs_scrub_directory_leaf1_bestfree(
/* Check free space info in a directory freespace block. */ /* Check free space info in a directory freespace block. */
STATIC int STATIC int
xfs_scrub_directory_free_bestfree( xchk_directory_free_bestfree(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_da_args *args, struct xfs_da_args *args,
xfs_dablk_t lblk) xfs_dablk_t lblk)
@ -587,15 +587,15 @@ xfs_scrub_directory_free_bestfree(
/* Read the free space block */ /* Read the free space block */
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp); error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out; goto out;
xfs_scrub_buffer_recheck(sc, bp); xchk_buffer_recheck(sc, bp);
if (xfs_sb_version_hascrc(&sc->mp->m_sb)) { if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr; struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0)) if (hdr3->pad != cpu_to_be32(0))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
} }
/* Check all the entries. */ /* Check all the entries. */
@ -610,22 +610,22 @@ xfs_scrub_directory_free_bestfree(
error = xfs_dir3_data_read(sc->tp, sc->ip, error = xfs_dir3_data_read(sc->tp, sc->ip,
(freehdr.firstdb + i) * args->geo->fsbcount, (freehdr.firstdb + i) * args->geo->fsbcount,
-1, &dbp); -1, &dbp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error)) &error))
break; break;
xfs_scrub_directory_check_freesp(sc, lblk, dbp, best); xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp); xfs_trans_brelse(sc->tp, dbp);
} }
if (freehdr.nused + stale != freehdr.nvalid) if (freehdr.nused + stale != freehdr.nvalid)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
out: out:
return error; return error;
} }
/* Check free space information in directories. */ /* Check free space information in directories. */
STATIC int STATIC int
xfs_scrub_directory_blocks( xchk_directory_blocks(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_bmbt_irec got; struct xfs_bmbt_irec got;
@ -656,7 +656,7 @@ xfs_scrub_directory_blocks(
args.geo = mp->m_dir_geo; args.geo = mp->m_dir_geo;
args.trans = sc->tp; args.trans = sc->tp;
error = xfs_dir2_isblock(&args, &is_block); error = xfs_dir2_isblock(&args, &is_block);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out; goto out;
/* Iterate all the data extents in the directory... */ /* Iterate all the data extents in the directory... */
@ -666,7 +666,7 @@ xfs_scrub_directory_blocks(
if (is_block && if (is_block &&
(got.br_startoff > 0 || (got.br_startoff > 0 ||
got.br_blockcount != args.geo->fsbcount)) { got.br_blockcount != args.geo->fsbcount)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
got.br_startoff); got.br_startoff);
break; break;
} }
@ -690,7 +690,7 @@ xfs_scrub_directory_blocks(
args.geo->fsbcount); args.geo->fsbcount);
lblk < got.br_startoff + got.br_blockcount; lblk < got.br_startoff + got.br_blockcount;
lblk += args.geo->fsbcount) { lblk += args.geo->fsbcount) {
error = xfs_scrub_directory_data_bestfree(sc, lblk, error = xchk_directory_data_bestfree(sc, lblk,
is_block); is_block);
if (error) if (error)
goto out; goto out;
@ -709,10 +709,10 @@ xfs_scrub_directory_blocks(
got.br_blockcount == args.geo->fsbcount && got.br_blockcount == args.geo->fsbcount &&
!xfs_iext_next_extent(ifp, &icur, &got)) { !xfs_iext_next_extent(ifp, &icur, &got)) {
if (is_block) { if (is_block) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out; goto out;
} }
error = xfs_scrub_directory_leaf1_bestfree(sc, &args, error = xchk_directory_leaf1_bestfree(sc, &args,
leaf_lblk); leaf_lblk);
if (error) if (error)
goto out; goto out;
@ -731,11 +731,11 @@ xfs_scrub_directory_blocks(
*/ */
lblk = got.br_startoff; lblk = got.br_startoff;
if (lblk & ~0xFFFFFFFFULL) { if (lblk & ~0xFFFFFFFFULL) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out; goto out;
} }
if (is_block) { if (is_block) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out; goto out;
} }
@ -754,7 +754,7 @@ xfs_scrub_directory_blocks(
args.geo->fsbcount); args.geo->fsbcount);
lblk < got.br_startoff + got.br_blockcount; lblk < got.br_startoff + got.br_blockcount;
lblk += args.geo->fsbcount) { lblk += args.geo->fsbcount) {
error = xfs_scrub_directory_free_bestfree(sc, &args, error = xchk_directory_free_bestfree(sc, &args,
lblk); lblk);
if (error) if (error)
goto out; goto out;
@ -769,11 +769,11 @@ xfs_scrub_directory_blocks(
/* Scrub a whole directory. */ /* Scrub a whole directory. */
int int
xfs_scrub_directory( xchk_directory(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_scrub_dir_ctx sdc = { struct xchk_dir_ctx sdc = {
.dir_iter.actor = xfs_scrub_dir_actor, .dir_iter.actor = xchk_dir_actor,
.dir_iter.pos = 0, .dir_iter.pos = 0,
.sc = sc, .sc = sc,
}; };
@ -786,12 +786,12 @@ xfs_scrub_directory(
/* Plausible size? */ /* Plausible size? */
if (sc->ip->i_d.di_size < xfs_dir2_sf_hdr_size(0)) { if (sc->ip->i_d.di_size < xfs_dir2_sf_hdr_size(0)) {
xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino); xchk_ino_set_corrupt(sc, sc->ip->i_ino);
goto out; goto out;
} }
/* Check directory tree structure */ /* Check directory tree structure */
error = xfs_scrub_da_btree(sc, XFS_DATA_FORK, xfs_scrub_dir_rec, NULL); error = xchk_da_btree(sc, XFS_DATA_FORK, xchk_dir_rec, NULL);
if (error) if (error)
return error; return error;
@ -799,7 +799,7 @@ xfs_scrub_directory(
return error; return error;
/* Check the freespace. */ /* Check the freespace. */
error = xfs_scrub_directory_blocks(sc); error = xchk_directory_blocks(sc);
if (error) if (error)
return error; return error;
@ -816,7 +816,7 @@ xfs_scrub_directory(
/* /*
* Look up every name in this directory by hash. * Look up every name in this directory by hash.
* *
* Use the xfs_readdir function to call xfs_scrub_dir_actor on * Use the xfs_readdir function to call xchk_dir_actor on
* every directory entry in this directory. In _actor, we check * every directory entry in this directory. In _actor, we check
* the name, inode number, and ftype (if applicable) of the * the name, inode number, and ftype (if applicable) of the
* entry. xfs_readdir uses the VFS filldir functions to provide * entry. xfs_readdir uses the VFS filldir functions to provide
@ -834,7 +834,7 @@ xfs_scrub_directory(
xfs_iunlock(sc->ip, XFS_ILOCK_EXCL); xfs_iunlock(sc->ip, XFS_ILOCK_EXCL);
while (true) { while (true) {
error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize); error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error)) &error))
goto out; goto out;
if (oldpos == sdc.dir_iter.pos) if (oldpos == sdc.dir_iter.pos)

View File

@ -35,11 +35,11 @@
* try again after forcing logged inode cores out to disk. * try again after forcing logged inode cores out to disk.
*/ */
int int
xfs_scrub_setup_ag_iallocbt( xchk_setup_ag_iallocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_ag_btree(sc, ip, sc->try_harder); return xchk_setup_ag_btree(sc, ip, sc->try_harder);
} }
/* Inode btree scrubber. */ /* Inode btree scrubber. */
@ -50,7 +50,7 @@ xfs_scrub_setup_ag_iallocbt(
* we have a record or not depending on freecount. * we have a record or not depending on freecount.
*/ */
static inline void static inline void
xfs_scrub_iallocbt_chunk_xref_other( xchk_iallocbt_chunk_xref_other(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inobt_rec_incore *irec, struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino) xfs_agino_t agino)
@ -66,16 +66,16 @@ xfs_scrub_iallocbt_chunk_xref_other(
if (!(*pcur)) if (!(*pcur))
return; return;
error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec); error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
if (!xfs_scrub_should_check_xref(sc, &error, pcur)) if (!xchk_should_check_xref(sc, &error, pcur))
return; return;
if (((irec->ir_freecount > 0 && !has_irec) || if (((irec->ir_freecount > 0 && !has_irec) ||
(irec->ir_freecount == 0 && has_irec))) (irec->ir_freecount == 0 && has_irec)))
xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); xchk_btree_xref_set_corrupt(sc, *pcur, 0);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_iallocbt_chunk_xref( xchk_iallocbt_chunk_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inobt_rec_incore *irec, struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino, xfs_agino_t agino,
@ -87,17 +87,17 @@ xfs_scrub_iallocbt_chunk_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, len); xchk_xref_is_used_space(sc, agbno, len);
xfs_scrub_iallocbt_chunk_xref_other(sc, irec, agino); xchk_iallocbt_chunk_xref_other(sc, irec, agino);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
xfs_scrub_xref_is_owned_by(sc, agbno, len, &oinfo); xchk_xref_is_owned_by(sc, agbno, len, &oinfo);
xfs_scrub_xref_is_not_shared(sc, agbno, len); xchk_xref_is_not_shared(sc, agbno, len);
} }
/* Is this chunk worth checking? */ /* Is this chunk worth checking? */
STATIC bool STATIC bool
xfs_scrub_iallocbt_chunk( xchk_iallocbt_chunk(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec, struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino, xfs_agino_t agino,
xfs_extlen_t len) xfs_extlen_t len)
@ -110,16 +110,16 @@ xfs_scrub_iallocbt_chunk(
if (bno + len <= bno || if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) || !xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1)) !xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xfs_scrub_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len); xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
return true; return true;
} }
/* Count the number of free inodes. */ /* Count the number of free inodes. */
static unsigned int static unsigned int
xfs_scrub_iallocbt_freecount( xchk_iallocbt_freecount(
xfs_inofree_t freemask) xfs_inofree_t freemask)
{ {
BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64)); BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
@ -128,8 +128,8 @@ xfs_scrub_iallocbt_freecount(
/* Check a particular inode with ir_free. */ /* Check a particular inode with ir_free. */
STATIC int STATIC int
xfs_scrub_iallocbt_check_cluster_freemask( xchk_iallocbt_check_cluster_freemask(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
xfs_ino_t fsino, xfs_ino_t fsino,
xfs_agino_t chunkino, xfs_agino_t chunkino,
xfs_agino_t clusterino, xfs_agino_t clusterino,
@ -143,14 +143,14 @@ xfs_scrub_iallocbt_check_cluster_freemask(
bool inuse; bool inuse;
int error = 0; int error = 0;
if (xfs_scrub_should_terminate(bs->sc, &error)) if (xchk_should_terminate(bs->sc, &error))
return error; return error;
dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize); dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize);
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
(dip->di_version >= 3 && (dip->di_version >= 3 &&
be64_to_cpu(dip->di_ino) != fsino + clusterino)) { be64_to_cpu(dip->di_ino) != fsino + clusterino)) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out; goto out;
} }
@ -175,15 +175,15 @@ xfs_scrub_iallocbt_check_cluster_freemask(
freemask_ok = inode_is_free ^ inuse; freemask_ok = inode_is_free ^ inuse;
} }
if (!freemask_ok) if (!freemask_ok)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
out: out:
return 0; return 0;
} }
/* Make sure the free mask is consistent with what the inodes think. */ /* Make sure the free mask is consistent with what the inodes think. */
STATIC int STATIC int
xfs_scrub_iallocbt_check_freemask( xchk_iallocbt_check_freemask(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec) struct xfs_inobt_rec_incore *irec)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
@ -223,18 +223,18 @@ xfs_scrub_iallocbt_check_freemask(
/* The whole cluster must be a hole or not a hole. */ /* The whole cluster must be a hole or not a hole. */
ir_holemask = (irec->ir_holemask & holemask); ir_holemask = (irec->ir_holemask & holemask);
if (ir_holemask != holemask && ir_holemask != 0) { if (ir_holemask != holemask && ir_holemask != 0) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
continue; continue;
} }
/* If any part of this is a hole, skip it. */ /* If any part of this is a hole, skip it. */
if (ir_holemask) { if (ir_holemask) {
xfs_scrub_xref_is_not_owned_by(bs->sc, agbno, xchk_xref_is_not_owned_by(bs->sc, agbno,
blks_per_cluster, &oinfo); blks_per_cluster, &oinfo);
continue; continue;
} }
xfs_scrub_xref_is_owned_by(bs->sc, agbno, blks_per_cluster, xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
&oinfo); &oinfo);
/* Grab the inode cluster buffer. */ /* Grab the inode cluster buffer. */
@ -245,13 +245,13 @@ xfs_scrub_iallocbt_check_freemask(
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
&dip, &bp, 0, 0); &dip, &bp, 0, 0);
if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur, 0, if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
&error)) &error))
continue; continue;
/* Which inodes are free? */ /* Which inodes are free? */
for (clusterino = 0; clusterino < nr_inodes; clusterino++) { for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
error = xfs_scrub_iallocbt_check_cluster_freemask(bs, error = xchk_iallocbt_check_cluster_freemask(bs,
fsino, chunkino, clusterino, irec, bp); fsino, chunkino, clusterino, irec, bp);
if (error) { if (error) {
xfs_trans_brelse(bs->cur->bc_tp, bp); xfs_trans_brelse(bs->cur->bc_tp, bp);
@ -267,8 +267,8 @@ xfs_scrub_iallocbt_check_freemask(
/* Scrub an inobt/finobt record. */ /* Scrub an inobt/finobt record. */
STATIC int STATIC int
xfs_scrub_iallocbt_rec( xchk_iallocbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_mount *mp = bs->cur->bc_mp;
@ -289,18 +289,18 @@ xfs_scrub_iallocbt_rec(
if (irec.ir_count > XFS_INODES_PER_CHUNK || if (irec.ir_count > XFS_INODES_PER_CHUNK ||
irec.ir_freecount > XFS_INODES_PER_CHUNK) irec.ir_freecount > XFS_INODES_PER_CHUNK)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
real_freecount = irec.ir_freecount + real_freecount = irec.ir_freecount +
(XFS_INODES_PER_CHUNK - irec.ir_count); (XFS_INODES_PER_CHUNK - irec.ir_count);
if (real_freecount != xfs_scrub_iallocbt_freecount(irec.ir_free)) if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
agino = irec.ir_startino; agino = irec.ir_startino;
/* Record has to be properly aligned within the AG. */ /* Record has to be properly aligned within the AG. */
if (!xfs_verify_agino(mp, agno, agino) || if (!xfs_verify_agino(mp, agno, agino) ||
!xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) { !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out; goto out;
} }
@ -308,7 +308,7 @@ xfs_scrub_iallocbt_rec(
agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino); agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) || if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
(agbno & (xfs_icluster_size_fsb(mp) - 1))) (agbno & (xfs_icluster_size_fsb(mp) - 1)))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
*inode_blocks += XFS_B_TO_FSB(mp, *inode_blocks += XFS_B_TO_FSB(mp,
irec.ir_count * mp->m_sb.sb_inodesize); irec.ir_count * mp->m_sb.sb_inodesize);
@ -318,9 +318,9 @@ xfs_scrub_iallocbt_rec(
len = XFS_B_TO_FSB(mp, len = XFS_B_TO_FSB(mp,
XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize); XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
if (irec.ir_count != XFS_INODES_PER_CHUNK) if (irec.ir_count != XFS_INODES_PER_CHUNK)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len)) if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
goto out; goto out;
goto check_freemask; goto check_freemask;
} }
@ -333,12 +333,12 @@ xfs_scrub_iallocbt_rec(
holes = ~xfs_inobt_irec_to_allocmask(&irec); holes = ~xfs_inobt_irec_to_allocmask(&irec);
if ((holes & irec.ir_free) != holes || if ((holes & irec.ir_free) != holes ||
irec.ir_freecount > irec.ir_count) irec.ir_freecount > irec.ir_count)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) { for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
if (holemask & 1) if (holemask & 1)
holecount += XFS_INODES_PER_HOLEMASK_BIT; holecount += XFS_INODES_PER_HOLEMASK_BIT;
else if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len)) else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
break; break;
holemask >>= 1; holemask >>= 1;
agino += XFS_INODES_PER_HOLEMASK_BIT; agino += XFS_INODES_PER_HOLEMASK_BIT;
@ -346,10 +346,10 @@ xfs_scrub_iallocbt_rec(
if (holecount > XFS_INODES_PER_CHUNK || if (holecount > XFS_INODES_PER_CHUNK ||
holecount + irec.ir_count != XFS_INODES_PER_CHUNK) holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
check_freemask: check_freemask:
error = xfs_scrub_iallocbt_check_freemask(bs, &irec); error = xchk_iallocbt_check_freemask(bs, &irec);
if (error) if (error)
goto out; goto out;
@ -362,7 +362,7 @@ xfs_scrub_iallocbt_rec(
* Don't bother if we're missing btree cursors, as we're already corrupt. * Don't bother if we're missing btree cursors, as we're already corrupt.
*/ */
STATIC void STATIC void
xfs_scrub_iallocbt_xref_rmap_btreeblks( xchk_iallocbt_xref_rmap_btreeblks(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int which) int which)
{ {
@ -374,27 +374,27 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks(
if (!sc->sa.ino_cur || !sc->sa.rmap_cur || if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
(xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) || (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
xfs_scrub_skip_xref(sc->sm)) xchk_skip_xref(sc->sm))
return; return;
/* Check that we saw as many inobt blocks as the rmap says. */ /* Check that we saw as many inobt blocks as the rmap says. */
error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
if (!xfs_scrub_process_error(sc, 0, 0, &error)) if (!xchk_process_error(sc, 0, 0, &error))
return; return;
if (sc->sa.fino_cur) { if (sc->sa.fino_cur) {
error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
if (!xfs_scrub_process_error(sc, 0, 0, &error)) if (!xchk_process_error(sc, 0, 0, &error))
return; return;
} }
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks); &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (blocks != inobt_blocks + finobt_blocks) if (blocks != inobt_blocks + finobt_blocks)
xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0); xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
} }
/* /*
@ -402,7 +402,7 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks(
* the rmap says are owned by inodes. * the rmap says are owned by inodes.
*/ */
STATIC void STATIC void
xfs_scrub_iallocbt_xref_rmap_inodes( xchk_iallocbt_xref_rmap_inodes(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
int which, int which,
xfs_filblks_t inode_blocks) xfs_filblks_t inode_blocks)
@ -411,22 +411,22 @@ xfs_scrub_iallocbt_xref_rmap_inodes(
xfs_filblks_t blocks; xfs_filblks_t blocks;
int error; int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return; return;
/* Check that we saw as many inode blocks as the rmap knows about. */ /* Check that we saw as many inode blocks as the rmap knows about. */
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks); &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (blocks != inode_blocks) if (blocks != inode_blocks)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
} }
/* Scrub the inode btrees for some AG. */ /* Scrub the inode btrees for some AG. */
STATIC int STATIC int
xfs_scrub_iallocbt( xchk_iallocbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_btnum_t which) xfs_btnum_t which)
{ {
@ -437,12 +437,12 @@ xfs_scrub_iallocbt(
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur; cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
error = xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo, error = xchk_btree(sc, cur, xchk_iallocbt_rec, &oinfo,
&inode_blocks); &inode_blocks);
if (error) if (error)
return error; return error;
xfs_scrub_iallocbt_xref_rmap_btreeblks(sc, which); xchk_iallocbt_xref_rmap_btreeblks(sc, which);
/* /*
* If we're scrubbing the inode btree, inode_blocks is the number of * If we're scrubbing the inode btree, inode_blocks is the number of
@ -452,28 +452,28 @@ xfs_scrub_iallocbt(
* to inode chunks with free inodes. * to inode chunks with free inodes.
*/ */
if (which == XFS_BTNUM_INO) if (which == XFS_BTNUM_INO)
xfs_scrub_iallocbt_xref_rmap_inodes(sc, which, inode_blocks); xchk_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
return error; return error;
} }
int int
xfs_scrub_inobt( xchk_inobt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_iallocbt(sc, XFS_BTNUM_INO); return xchk_iallocbt(sc, XFS_BTNUM_INO);
} }
int int
xfs_scrub_finobt( xchk_finobt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO); return xchk_iallocbt(sc, XFS_BTNUM_FINO);
} }
/* See if an inode btree has (or doesn't have) an inode chunk record. */ /* See if an inode btree has (or doesn't have) an inode chunk record. */
static inline void static inline void
xfs_scrub_xref_inode_check( xchk_xref_inode_check(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len, xfs_extlen_t len,
@ -483,33 +483,33 @@ xfs_scrub_xref_inode_check(
bool has_inodes; bool has_inodes;
int error; int error;
if (!(*icur) || xfs_scrub_skip_xref(sc->sm)) if (!(*icur) || xchk_skip_xref(sc->sm))
return; return;
error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes); error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
if (!xfs_scrub_should_check_xref(sc, &error, icur)) if (!xchk_should_check_xref(sc, &error, icur))
return; return;
if (has_inodes != should_have_inodes) if (has_inodes != should_have_inodes)
xfs_scrub_btree_xref_set_corrupt(sc, *icur, 0); xchk_btree_xref_set_corrupt(sc, *icur, 0);
} }
/* xref check that the extent is not covered by inodes */ /* xref check that the extent is not covered by inodes */
void void
xfs_scrub_xref_is_not_inode_chunk( xchk_xref_is_not_inode_chunk(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
{ {
xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false); xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false); xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
} }
/* xref check that the extent is covered by inodes */ /* xref check that the extent is covered by inodes */
void void
xfs_scrub_xref_is_inode_chunk( xchk_xref_is_inode_chunk(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
{ {
xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true); xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
} }

View File

@ -37,7 +37,7 @@
* the goal. * the goal.
*/ */
int int
xfs_scrub_setup_inode( xchk_setup_inode(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
@ -47,13 +47,13 @@ xfs_scrub_setup_inode(
* Try to get the inode. If the verifiers fail, we try again * Try to get the inode. If the verifiers fail, we try again
* in raw mode. * in raw mode.
*/ */
error = xfs_scrub_get_inode(sc, ip); error = xchk_get_inode(sc, ip);
switch (error) { switch (error) {
case 0: case 0:
break; break;
case -EFSCORRUPTED: case -EFSCORRUPTED:
case -EFSBADCRC: case -EFSBADCRC:
return xfs_scrub_trans_alloc(sc, 0); return xchk_trans_alloc(sc, 0);
default: default:
return error; return error;
} }
@ -61,7 +61,7 @@ xfs_scrub_setup_inode(
/* Got the inode, lock it and we're ready to go. */ /* Got the inode, lock it and we're ready to go. */
sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags); xfs_ilock(sc->ip, sc->ilock_flags);
error = xfs_scrub_trans_alloc(sc, 0); error = xchk_trans_alloc(sc, 0);
if (error) if (error)
goto out; goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL; sc->ilock_flags |= XFS_ILOCK_EXCL;
@ -76,7 +76,7 @@ xfs_scrub_setup_inode(
/* Validate di_extsize hint. */ /* Validate di_extsize hint. */
STATIC void STATIC void
xfs_scrub_inode_extsize( xchk_inode_extsize(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_dinode *dip, struct xfs_dinode *dip,
xfs_ino_t ino, xfs_ino_t ino,
@ -88,7 +88,7 @@ xfs_scrub_inode_extsize(
fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize), fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
mode, flags); mode, flags);
if (fa) if (fa)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} }
/* /*
@ -98,7 +98,7 @@ xfs_scrub_inode_extsize(
* These functions must be kept in sync with each other. * These functions must be kept in sync with each other.
*/ */
STATIC void STATIC void
xfs_scrub_inode_cowextsize( xchk_inode_cowextsize(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_dinode *dip, struct xfs_dinode *dip,
xfs_ino_t ino, xfs_ino_t ino,
@ -112,12 +112,12 @@ xfs_scrub_inode_cowextsize(
be32_to_cpu(dip->di_cowextsize), mode, flags, be32_to_cpu(dip->di_cowextsize), mode, flags,
flags2); flags2);
if (fa) if (fa)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} }
/* Make sure the di_flags make sense for the inode. */ /* Make sure the di_flags make sense for the inode. */
STATIC void STATIC void
xfs_scrub_inode_flags( xchk_inode_flags(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_dinode *dip, struct xfs_dinode *dip,
xfs_ino_t ino, xfs_ino_t ino,
@ -157,12 +157,12 @@ xfs_scrub_inode_flags(
return; return;
bad: bad:
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} }
/* Make sure the di_flags2 make sense for the inode. */ /* Make sure the di_flags2 make sense for the inode. */
STATIC void STATIC void
xfs_scrub_inode_flags2( xchk_inode_flags2(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_dinode *dip, struct xfs_dinode *dip,
xfs_ino_t ino, xfs_ino_t ino,
@ -200,12 +200,12 @@ xfs_scrub_inode_flags2(
return; return;
bad: bad:
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} }
/* Scrub all the ondisk inode fields. */ /* Scrub all the ondisk inode fields. */
STATIC void STATIC void
xfs_scrub_dinode( xchk_dinode(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_dinode *dip, struct xfs_dinode *dip,
xfs_ino_t ino) xfs_ino_t ino)
@ -237,7 +237,7 @@ xfs_scrub_dinode(
/* mode is recognized */ /* mode is recognized */
break; break;
default: default:
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
} }
@ -248,22 +248,22 @@ xfs_scrub_dinode(
* We autoconvert v1 inodes into v2 inodes on writeout, * We autoconvert v1 inodes into v2 inodes on writeout,
* so just mark this inode for preening. * so just mark this inode for preening.
*/ */
xfs_scrub_ino_set_preen(sc, ino); xchk_ino_set_preen(sc, ino);
break; break;
case 2: case 2:
case 3: case 3:
if (dip->di_onlink != 0) if (dip->di_onlink != 0)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
if (dip->di_mode == 0 && sc->ip) if (dip->di_mode == 0 && sc->ip)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
if (dip->di_projid_hi != 0 && if (dip->di_projid_hi != 0 &&
!xfs_sb_version_hasprojid32bit(&mp->m_sb)) !xfs_sb_version_hasprojid32bit(&mp->m_sb))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
default: default:
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
return; return;
} }
@ -273,40 +273,40 @@ xfs_scrub_dinode(
*/ */
if (dip->di_uid == cpu_to_be32(-1U) || if (dip->di_uid == cpu_to_be32(-1U) ||
dip->di_gid == cpu_to_be32(-1U)) dip->di_gid == cpu_to_be32(-1U))
xfs_scrub_ino_set_warning(sc, ino); xchk_ino_set_warning(sc, ino);
/* di_format */ /* di_format */
switch (dip->di_format) { switch (dip->di_format) {
case XFS_DINODE_FMT_DEV: case XFS_DINODE_FMT_DEV:
if (!S_ISCHR(mode) && !S_ISBLK(mode) && if (!S_ISCHR(mode) && !S_ISBLK(mode) &&
!S_ISFIFO(mode) && !S_ISSOCK(mode)) !S_ISFIFO(mode) && !S_ISSOCK(mode))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
case XFS_DINODE_FMT_LOCAL: case XFS_DINODE_FMT_LOCAL:
if (!S_ISDIR(mode) && !S_ISLNK(mode)) if (!S_ISDIR(mode) && !S_ISLNK(mode))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_EXTENTS:
if (!S_ISREG(mode) && !S_ISDIR(mode) && !S_ISLNK(mode)) if (!S_ISREG(mode) && !S_ISDIR(mode) && !S_ISLNK(mode))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
case XFS_DINODE_FMT_BTREE: case XFS_DINODE_FMT_BTREE:
if (!S_ISREG(mode) && !S_ISDIR(mode)) if (!S_ISREG(mode) && !S_ISDIR(mode))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
case XFS_DINODE_FMT_UUID: case XFS_DINODE_FMT_UUID:
default: default:
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
} }
/* di_[amc]time.nsec */ /* di_[amc]time.nsec */
if (be32_to_cpu(dip->di_atime.t_nsec) >= NSEC_PER_SEC) if (be32_to_cpu(dip->di_atime.t_nsec) >= NSEC_PER_SEC)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
if (be32_to_cpu(dip->di_mtime.t_nsec) >= NSEC_PER_SEC) if (be32_to_cpu(dip->di_mtime.t_nsec) >= NSEC_PER_SEC)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
if (be32_to_cpu(dip->di_ctime.t_nsec) >= NSEC_PER_SEC) if (be32_to_cpu(dip->di_ctime.t_nsec) >= NSEC_PER_SEC)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
/* /*
* di_size. xfs_dinode_verify checks for things that screw up * di_size. xfs_dinode_verify checks for things that screw up
@ -315,19 +315,19 @@ xfs_scrub_dinode(
*/ */
isize = be64_to_cpu(dip->di_size); isize = be64_to_cpu(dip->di_size);
if (isize & (1ULL << 63)) if (isize & (1ULL << 63))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
/* Devices, fifos, and sockets must have zero size */ /* Devices, fifos, and sockets must have zero size */
if (!S_ISDIR(mode) && !S_ISREG(mode) && !S_ISLNK(mode) && isize != 0) if (!S_ISDIR(mode) && !S_ISREG(mode) && !S_ISLNK(mode) && isize != 0)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
/* Directories can't be larger than the data section size (32G) */ /* Directories can't be larger than the data section size (32G) */
if (S_ISDIR(mode) && (isize == 0 || isize >= XFS_DIR2_SPACE_SIZE)) if (S_ISDIR(mode) && (isize == 0 || isize >= XFS_DIR2_SPACE_SIZE))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
/* Symlinks can't be larger than SYMLINK_MAXLEN */ /* Symlinks can't be larger than SYMLINK_MAXLEN */
if (S_ISLNK(mode) && (isize == 0 || isize >= XFS_SYMLINK_MAXLEN)) if (S_ISLNK(mode) && (isize == 0 || isize >= XFS_SYMLINK_MAXLEN))
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
/* /*
* Warn if the running kernel can't handle the kinds of offsets * Warn if the running kernel can't handle the kinds of offsets
@ -336,7 +336,7 @@ xfs_scrub_dinode(
* overly large offsets, flag the inode for admin review. * overly large offsets, flag the inode for admin review.
*/ */
if (isize >= mp->m_super->s_maxbytes) if (isize >= mp->m_super->s_maxbytes)
xfs_scrub_ino_set_warning(sc, ino); xchk_ino_set_warning(sc, ino);
/* di_nblocks */ /* di_nblocks */
if (flags2 & XFS_DIFLAG2_REFLINK) { if (flags2 & XFS_DIFLAG2_REFLINK) {
@ -351,15 +351,15 @@ xfs_scrub_dinode(
*/ */
if (be64_to_cpu(dip->di_nblocks) >= if (be64_to_cpu(dip->di_nblocks) >=
mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks) mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} else { } else {
if (be64_to_cpu(dip->di_nblocks) >= mp->m_sb.sb_dblocks) if (be64_to_cpu(dip->di_nblocks) >= mp->m_sb.sb_dblocks)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} }
xfs_scrub_inode_flags(sc, dip, ino, mode, flags); xchk_inode_flags(sc, dip, ino, mode, flags);
xfs_scrub_inode_extsize(sc, dip, ino, mode, flags); xchk_inode_extsize(sc, dip, ino, mode, flags);
/* di_nextents */ /* di_nextents */
nextents = be32_to_cpu(dip->di_nextents); nextents = be32_to_cpu(dip->di_nextents);
@ -367,31 +367,31 @@ xfs_scrub_dinode(
switch (dip->di_format) { switch (dip->di_format) {
case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_EXTENTS:
if (nextents > fork_recs) if (nextents > fork_recs)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
case XFS_DINODE_FMT_BTREE: case XFS_DINODE_FMT_BTREE:
if (nextents <= fork_recs) if (nextents <= fork_recs)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
default: default:
if (nextents != 0) if (nextents != 0)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
} }
/* di_forkoff */ /* di_forkoff */
if (XFS_DFORK_APTR(dip) >= (char *)dip + mp->m_sb.sb_inodesize) if (XFS_DFORK_APTR(dip) >= (char *)dip + mp->m_sb.sb_inodesize)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
if (dip->di_anextents != 0 && dip->di_forkoff == 0) if (dip->di_anextents != 0 && dip->di_forkoff == 0)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
if (dip->di_forkoff == 0 && dip->di_aformat != XFS_DINODE_FMT_EXTENTS) if (dip->di_forkoff == 0 && dip->di_aformat != XFS_DINODE_FMT_EXTENTS)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
/* di_aformat */ /* di_aformat */
if (dip->di_aformat != XFS_DINODE_FMT_LOCAL && if (dip->di_aformat != XFS_DINODE_FMT_LOCAL &&
dip->di_aformat != XFS_DINODE_FMT_EXTENTS && dip->di_aformat != XFS_DINODE_FMT_EXTENTS &&
dip->di_aformat != XFS_DINODE_FMT_BTREE) dip->di_aformat != XFS_DINODE_FMT_BTREE)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
/* di_anextents */ /* di_anextents */
nextents = be16_to_cpu(dip->di_anextents); nextents = be16_to_cpu(dip->di_anextents);
@ -399,22 +399,22 @@ xfs_scrub_dinode(
switch (dip->di_aformat) { switch (dip->di_aformat) {
case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_EXTENTS:
if (nextents > fork_recs) if (nextents > fork_recs)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
case XFS_DINODE_FMT_BTREE: case XFS_DINODE_FMT_BTREE:
if (nextents <= fork_recs) if (nextents <= fork_recs)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
break; break;
default: default:
if (nextents != 0) if (nextents != 0)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} }
if (dip->di_version >= 3) { if (dip->di_version >= 3) {
if (be32_to_cpu(dip->di_crtime.t_nsec) >= NSEC_PER_SEC) if (be32_to_cpu(dip->di_crtime.t_nsec) >= NSEC_PER_SEC)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
xfs_scrub_inode_flags2(sc, dip, ino, mode, flags, flags2); xchk_inode_flags2(sc, dip, ino, mode, flags, flags2);
xfs_scrub_inode_cowextsize(sc, dip, ino, mode, flags, xchk_inode_cowextsize(sc, dip, ino, mode, flags,
flags2); flags2);
} }
} }
@ -425,7 +425,7 @@ xfs_scrub_dinode(
* IGET_UNTRUSTED, which checks the inobt for us. * IGET_UNTRUSTED, which checks the inobt for us.
*/ */
static void static void
xfs_scrub_inode_xref_finobt( xchk_inode_xref_finobt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
@ -434,7 +434,7 @@ xfs_scrub_inode_xref_finobt(
int has_record; int has_record;
int error; int error;
if (!sc->sa.fino_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm))
return; return;
agino = XFS_INO_TO_AGINO(sc->mp, ino); agino = XFS_INO_TO_AGINO(sc->mp, ino);
@ -445,12 +445,12 @@ xfs_scrub_inode_xref_finobt(
*/ */
error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE, error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE,
&has_record); &has_record);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) || if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
!has_record) !has_record)
return; return;
error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record); error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) || if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
!has_record) !has_record)
return; return;
@ -463,12 +463,12 @@ xfs_scrub_inode_xref_finobt(
return; return;
if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)) if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
} }
/* Cross reference the inode fields with the forks. */ /* Cross reference the inode fields with the forks. */
STATIC void STATIC void
xfs_scrub_inode_xref_bmap( xchk_inode_xref_bmap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_dinode *dip) struct xfs_dinode *dip)
{ {
@ -477,32 +477,32 @@ xfs_scrub_inode_xref_bmap(
xfs_filblks_t acount; xfs_filblks_t acount;
int error; int error;
if (xfs_scrub_skip_xref(sc->sm)) if (xchk_skip_xref(sc->sm))
return; return;
/* Walk all the extents to check nextents/naextents/nblocks. */ /* Walk all the extents to check nextents/naextents/nblocks. */
error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK, error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
&nextents, &count); &nextents, &count);
if (!xfs_scrub_should_check_xref(sc, &error, NULL)) if (!xchk_should_check_xref(sc, &error, NULL))
return; return;
if (nextents < be32_to_cpu(dip->di_nextents)) if (nextents < be32_to_cpu(dip->di_nextents))
xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino); xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK, error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
&nextents, &acount); &nextents, &acount);
if (!xfs_scrub_should_check_xref(sc, &error, NULL)) if (!xchk_should_check_xref(sc, &error, NULL))
return; return;
if (nextents != be16_to_cpu(dip->di_anextents)) if (nextents != be16_to_cpu(dip->di_anextents))
xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino); xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
/* Check nblocks against the inode. */ /* Check nblocks against the inode. */
if (count + acount != be64_to_cpu(dip->di_nblocks)) if (count + acount != be64_to_cpu(dip->di_nblocks))
xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino); xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_inode_xref( xchk_inode_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino, xfs_ino_t ino,
struct xfs_dinode *dip) struct xfs_dinode *dip)
@ -518,18 +518,18 @@ xfs_scrub_inode_xref(
agno = XFS_INO_TO_AGNO(sc->mp, ino); agno = XFS_INO_TO_AGNO(sc->mp, ino);
agbno = XFS_INO_TO_AGBNO(sc->mp, ino); agbno = XFS_INO_TO_AGBNO(sc->mp, ino);
error = xfs_scrub_ag_init(sc, agno, &sc->sa); error = xchk_ag_init(sc, agno, &sc->sa);
if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error)) if (!xchk_xref_process_error(sc, agno, agbno, &error))
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, 1); xchk_xref_is_used_space(sc, agbno, 1);
xfs_scrub_inode_xref_finobt(sc, ino); xchk_inode_xref_finobt(sc, ino);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo); xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
xfs_scrub_xref_is_not_shared(sc, agbno, 1); xchk_xref_is_not_shared(sc, agbno, 1);
xfs_scrub_inode_xref_bmap(sc, dip); xchk_inode_xref_bmap(sc, dip);
xfs_scrub_ag_free(sc, &sc->sa); xchk_ag_free(sc, &sc->sa);
} }
/* /*
@ -539,7 +539,7 @@ xfs_scrub_inode_xref(
* reflink filesystem. * reflink filesystem.
*/ */
static void static void
xfs_scrub_inode_check_reflink_iflag( xchk_inode_check_reflink_iflag(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t ino) xfs_ino_t ino)
{ {
@ -552,18 +552,18 @@ xfs_scrub_inode_check_reflink_iflag(
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&has_shared); &has_shared);
if (!xfs_scrub_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino), if (!xchk_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
XFS_INO_TO_AGBNO(mp, ino), &error)) XFS_INO_TO_AGBNO(mp, ino), &error))
return; return;
if (xfs_is_reflink_inode(sc->ip) && !has_shared) if (xfs_is_reflink_inode(sc->ip) && !has_shared)
xfs_scrub_ino_set_preen(sc, ino); xchk_ino_set_preen(sc, ino);
else if (!xfs_is_reflink_inode(sc->ip) && has_shared) else if (!xfs_is_reflink_inode(sc->ip) && has_shared)
xfs_scrub_ino_set_corrupt(sc, ino); xchk_ino_set_corrupt(sc, ino);
} }
/* Scrub an inode. */ /* Scrub an inode. */
int int
xfs_scrub_inode( xchk_inode(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_dinode di; struct xfs_dinode di;
@ -575,13 +575,13 @@ xfs_scrub_inode(
* and a NULL inode, so flag the corruption error and return. * and a NULL inode, so flag the corruption error and return.
*/ */
if (!sc->ip) { if (!sc->ip) {
xfs_scrub_ino_set_corrupt(sc, sc->sm->sm_ino); xchk_ino_set_corrupt(sc, sc->sm->sm_ino);
return 0; return 0;
} }
/* Scrub the inode core. */ /* Scrub the inode core. */
xfs_inode_to_disk(sc->ip, &di, 0); xfs_inode_to_disk(sc->ip, &di, 0);
xfs_scrub_dinode(sc, &di, sc->ip->i_ino); xchk_dinode(sc, &di, sc->ip->i_ino);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out; goto out;
@ -591,9 +591,9 @@ xfs_scrub_inode(
* we scrubbed the dinode. * we scrubbed the dinode.
*/ */
if (S_ISREG(VFS_I(sc->ip)->i_mode)) if (S_ISREG(VFS_I(sc->ip)->i_mode))
xfs_scrub_inode_check_reflink_iflag(sc, sc->ip->i_ino); xchk_inode_check_reflink_iflag(sc, sc->ip->i_ino);
xfs_scrub_inode_xref(sc, sc->ip->i_ino, &di); xchk_inode_xref(sc, sc->ip->i_ino, &di);
out: out:
return error; return error;
} }

View File

@ -27,18 +27,18 @@
/* Set us up to scrub parents. */ /* Set us up to scrub parents. */
int int
xfs_scrub_setup_parent( xchk_setup_parent(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_inode_contents(sc, ip, 0); return xchk_setup_inode_contents(sc, ip, 0);
} }
/* Parent pointers */ /* Parent pointers */
/* Look for an entry in a parent pointing to this inode. */ /* Look for an entry in a parent pointing to this inode. */
struct xfs_scrub_parent_ctx { struct xchk_parent_ctx {
struct dir_context dc; struct dir_context dc;
xfs_ino_t ino; xfs_ino_t ino;
xfs_nlink_t nlink; xfs_nlink_t nlink;
@ -46,7 +46,7 @@ struct xfs_scrub_parent_ctx {
/* Look for a single entry in a directory pointing to an inode. */ /* Look for a single entry in a directory pointing to an inode. */
STATIC int STATIC int
xfs_scrub_parent_actor( xchk_parent_actor(
struct dir_context *dc, struct dir_context *dc,
const char *name, const char *name,
int namelen, int namelen,
@ -54,9 +54,9 @@ xfs_scrub_parent_actor(
u64 ino, u64 ino,
unsigned type) unsigned type)
{ {
struct xfs_scrub_parent_ctx *spc; struct xchk_parent_ctx *spc;
spc = container_of(dc, struct xfs_scrub_parent_ctx, dc); spc = container_of(dc, struct xchk_parent_ctx, dc);
if (spc->ino == ino) if (spc->ino == ino)
spc->nlink++; spc->nlink++;
return 0; return 0;
@ -64,13 +64,13 @@ xfs_scrub_parent_actor(
/* Count the number of dentries in the parent dir that point to this inode. */ /* Count the number of dentries in the parent dir that point to this inode. */
STATIC int STATIC int
xfs_scrub_parent_count_parent_dentries( xchk_parent_count_parent_dentries(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *parent, struct xfs_inode *parent,
xfs_nlink_t *nlink) xfs_nlink_t *nlink)
{ {
struct xfs_scrub_parent_ctx spc = { struct xchk_parent_ctx spc = {
.dc.actor = xfs_scrub_parent_actor, .dc.actor = xchk_parent_actor,
.dc.pos = 0, .dc.pos = 0,
.ino = sc->ip->i_ino, .ino = sc->ip->i_ino,
.nlink = 0, .nlink = 0,
@ -120,7 +120,7 @@ xfs_scrub_parent_count_parent_dentries(
* entry pointing back to the inode being scrubbed. * entry pointing back to the inode being scrubbed.
*/ */
STATIC int STATIC int
xfs_scrub_parent_validate( xchk_parent_validate(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_ino_t dnum, xfs_ino_t dnum,
bool *try_again) bool *try_again)
@ -138,7 +138,7 @@ xfs_scrub_parent_validate(
/* '..' must not point to ourselves. */ /* '..' must not point to ourselves. */
if (sc->ip->i_ino == dnum) { if (sc->ip->i_ino == dnum) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
@ -165,13 +165,13 @@ xfs_scrub_parent_validate(
error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp); error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp);
if (error == -EINVAL) { if (error == -EINVAL) {
error = -EFSCORRUPTED; error = -EFSCORRUPTED;
xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error); xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error);
goto out; goto out;
} }
if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out; goto out;
if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) { if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_rele; goto out_rele;
} }
@ -183,12 +183,12 @@ xfs_scrub_parent_validate(
* the child inodes. * the child inodes.
*/ */
if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) { if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink); error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
&error)) &error))
goto out_unlock; goto out_unlock;
if (nlink != expected_nlink) if (nlink != expected_nlink)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_unlock; goto out_unlock;
} }
@ -200,18 +200,18 @@ xfs_scrub_parent_validate(
*/ */
xfs_iunlock(sc->ip, sc->ilock_flags); xfs_iunlock(sc->ip, sc->ilock_flags);
sc->ilock_flags = 0; sc->ilock_flags = 0;
error = xfs_scrub_ilock_inverted(dp, XFS_IOLOCK_SHARED); error = xchk_ilock_inverted(dp, XFS_IOLOCK_SHARED);
if (error) if (error)
goto out_rele; goto out_rele;
/* Go looking for our dentry. */ /* Go looking for our dentry. */
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink); error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_unlock; goto out_unlock;
/* Drop the parent lock, relock this inode. */ /* Drop the parent lock, relock this inode. */
xfs_iunlock(dp, XFS_IOLOCK_SHARED); xfs_iunlock(dp, XFS_IOLOCK_SHARED);
error = xfs_scrub_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL); error = xchk_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL);
if (error) if (error)
goto out_rele; goto out_rele;
sc->ilock_flags = XFS_IOLOCK_EXCL; sc->ilock_flags = XFS_IOLOCK_EXCL;
@ -225,7 +225,7 @@ xfs_scrub_parent_validate(
/* Look up '..' to see if the inode changed. */ /* Look up '..' to see if the inode changed. */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL); error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_rele; goto out_rele;
/* Drat, parent changed. Try again! */ /* Drat, parent changed. Try again! */
@ -241,7 +241,7 @@ xfs_scrub_parent_validate(
* for us in the parent. * for us in the parent.
*/ */
if (nlink != expected_nlink) if (nlink != expected_nlink)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return error; return error;
out_unlock: out_unlock:
@ -254,7 +254,7 @@ xfs_scrub_parent_validate(
/* Scrub a parent pointer. */ /* Scrub a parent pointer. */
int int
xfs_scrub_parent( xchk_parent(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
@ -272,7 +272,7 @@ xfs_scrub_parent(
/* We're not a special inode, are we? */ /* We're not a special inode, are we? */
if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) { if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
@ -288,10 +288,10 @@ xfs_scrub_parent(
/* Look up '..' */ /* Look up '..' */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL); error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out; goto out;
if (!xfs_verify_dir_ino(mp, dnum)) { if (!xfs_verify_dir_ino(mp, dnum)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
@ -299,12 +299,12 @@ xfs_scrub_parent(
if (sc->ip == mp->m_rootip) { if (sc->ip == mp->m_rootip) {
if (sc->ip->i_ino != mp->m_sb.sb_rootino || if (sc->ip->i_ino != mp->m_sb.sb_rootino ||
sc->ip->i_ino != dnum) sc->ip->i_ino != dnum)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
do { do {
error = xfs_scrub_parent_validate(sc, dnum, &try_again); error = xchk_parent_validate(sc, dnum, &try_again);
if (error) if (error)
goto out; goto out;
} while (try_again && ++tries < 20); } while (try_again && ++tries < 20);
@ -314,7 +314,7 @@ xfs_scrub_parent(
* incomplete. Userspace can decide if it wants to try again. * incomplete. Userspace can decide if it wants to try again.
*/ */
if (try_again && tries == 20) if (try_again && tries == 20)
xfs_scrub_set_incomplete(sc); xchk_set_incomplete(sc);
out: out:
/* /*
* If we failed to lock the parent inode even after a retry, just mark * If we failed to lock the parent inode even after a retry, just mark
@ -322,7 +322,7 @@ xfs_scrub_parent(
*/ */
if (sc->try_harder && error == -EDEADLOCK) { if (sc->try_harder && error == -EDEADLOCK) {
error = 0; error = 0;
xfs_scrub_set_incomplete(sc); xchk_set_incomplete(sc);
} }
return error; return error;
} }

View File

@ -30,7 +30,7 @@
/* Convert a scrub type code to a DQ flag, or return 0 if error. */ /* Convert a scrub type code to a DQ flag, or return 0 if error. */
static inline uint static inline uint
xfs_scrub_quota_to_dqtype( xchk_quota_to_dqtype(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
switch (sc->sm->sm_type) { switch (sc->sm->sm_type) {
@ -47,7 +47,7 @@ xfs_scrub_quota_to_dqtype(
/* Set us up to scrub a quota. */ /* Set us up to scrub a quota. */
int int
xfs_scrub_setup_quota( xchk_setup_quota(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
@ -57,14 +57,14 @@ xfs_scrub_setup_quota(
if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp)) if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
return -ENOENT; return -ENOENT;
dqtype = xfs_scrub_quota_to_dqtype(sc); dqtype = xchk_quota_to_dqtype(sc);
if (dqtype == 0) if (dqtype == 0)
return -EINVAL; return -EINVAL;
sc->has_quotaofflock = true; sc->has_quotaofflock = true;
mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock); mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
if (!xfs_this_quota_on(sc->mp, dqtype)) if (!xfs_this_quota_on(sc->mp, dqtype))
return -ENOENT; return -ENOENT;
error = xfs_scrub_setup_fs(sc, ip); error = xchk_setup_fs(sc, ip);
if (error) if (error)
return error; return error;
sc->ip = xfs_quota_inode(sc->mp, dqtype); sc->ip = xfs_quota_inode(sc->mp, dqtype);
@ -75,19 +75,19 @@ xfs_scrub_setup_quota(
/* Quotas. */ /* Quotas. */
struct xfs_scrub_quota_info { struct xchk_quota_info {
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
xfs_dqid_t last_id; xfs_dqid_t last_id;
}; };
/* Scrub the fields in an individual quota item. */ /* Scrub the fields in an individual quota item. */
STATIC int STATIC int
xfs_scrub_quota_item( xchk_quota_item(
struct xfs_dquot *dq, struct xfs_dquot *dq,
uint dqtype, uint dqtype,
void *priv) void *priv)
{ {
struct xfs_scrub_quota_info *sqi = priv; struct xchk_quota_info *sqi = priv;
struct xfs_scrub_context *sc = sqi->sc; struct xfs_scrub_context *sc = sqi->sc;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_disk_dquot *d = &dq->q_core; struct xfs_disk_dquot *d = &dq->q_core;
@ -111,16 +111,16 @@ xfs_scrub_quota_item(
*/ */
offset = id / qi->qi_dqperchunk; offset = id / qi->qi_dqperchunk;
if (id && id <= sqi->last_id) if (id && id <= sqi->last_id)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
sqi->last_id = id; sqi->last_id = id;
/* Did we get the dquot type we wanted? */ /* Did we get the dquot type we wanted? */
if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES)) if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0)) if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the limits. */ /* Check the limits. */
bhard = be64_to_cpu(d->d_blk_hardlimit); bhard = be64_to_cpu(d->d_blk_hardlimit);
@ -140,19 +140,19 @@ xfs_scrub_quota_item(
* the hard limit. * the hard limit.
*/ */
if (bhard > mp->m_sb.sb_dblocks) if (bhard > mp->m_sb.sb_dblocks)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (bsoft > bhard) if (bsoft > bhard)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (ihard > mp->m_maxicount) if (ihard > mp->m_maxicount)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (isoft > ihard) if (isoft > ihard)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (rhard > mp->m_sb.sb_rblocks) if (rhard > mp->m_sb.sb_rblocks)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (rsoft > rhard) if (rsoft > rhard)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the resource counts. */ /* Check the resource counts. */
bcount = be64_to_cpu(d->d_bcount); bcount = be64_to_cpu(d->d_bcount);
@ -167,15 +167,15 @@ xfs_scrub_quota_item(
*/ */
if (xfs_sb_version_hasreflink(&mp->m_sb)) { if (xfs_sb_version_hasreflink(&mp->m_sb)) {
if (mp->m_sb.sb_dblocks < bcount) if (mp->m_sb.sb_dblocks < bcount)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, xchk_fblock_set_warning(sc, XFS_DATA_FORK,
offset); offset);
} else { } else {
if (mp->m_sb.sb_dblocks < bcount) if (mp->m_sb.sb_dblocks < bcount)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
offset); offset);
} }
if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks) if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* /*
* We can violate the hard limits if the admin suddenly sets a * We can violate the hard limits if the admin suddenly sets a
@ -183,18 +183,18 @@ xfs_scrub_quota_item(
* admin review. * admin review.
*/ */
if (id != 0 && bhard != 0 && bcount > bhard) if (id != 0 && bhard != 0 && bcount > bhard)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && ihard != 0 && icount > ihard) if (id != 0 && ihard != 0 && icount > ihard)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && rhard != 0 && rcount > rhard) if (id != 0 && rhard != 0 && rcount > rhard)
xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset); xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
return 0; return 0;
} }
/* Check the quota's data fork. */ /* Check the quota's data fork. */
STATIC int STATIC int
xfs_scrub_quota_data_fork( xchk_quota_data_fork(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_bmbt_irec irec = { 0 }; struct xfs_bmbt_irec irec = { 0 };
@ -205,7 +205,7 @@ xfs_scrub_quota_data_fork(
int error = 0; int error = 0;
/* Invoke the fork scrubber. */ /* Invoke the fork scrubber. */
error = xfs_scrub_metadata_inode_forks(sc); error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error; return error;
@ -213,7 +213,7 @@ xfs_scrub_quota_data_fork(
max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk; max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK); ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
for_each_xfs_iext(ifp, &icur, &irec) { for_each_xfs_iext(ifp, &icur, &irec) {
if (xfs_scrub_should_terminate(sc, &error)) if (xchk_should_terminate(sc, &error))
break; break;
/* /*
* delalloc extents or blocks mapped above the highest * delalloc extents or blocks mapped above the highest
@ -222,7 +222,7 @@ xfs_scrub_quota_data_fork(
if (isnullstartblock(irec.br_startblock) || if (isnullstartblock(irec.br_startblock) ||
irec.br_startoff > max_dqid_off || irec.br_startoff > max_dqid_off ||
irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) { irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
irec.br_startoff); irec.br_startoff);
break; break;
} }
@ -233,19 +233,19 @@ xfs_scrub_quota_data_fork(
/* Scrub all of a quota type's items. */ /* Scrub all of a quota type's items. */
int int
xfs_scrub_quota( xchk_quota(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_scrub_quota_info sqi; struct xchk_quota_info sqi;
struct xfs_mount *mp = sc->mp; struct xfs_mount *mp = sc->mp;
struct xfs_quotainfo *qi = mp->m_quotainfo; struct xfs_quotainfo *qi = mp->m_quotainfo;
uint dqtype; uint dqtype;
int error = 0; int error = 0;
dqtype = xfs_scrub_quota_to_dqtype(sc); dqtype = xchk_quota_to_dqtype(sc);
/* Look for problem extents. */ /* Look for problem extents. */
error = xfs_scrub_quota_data_fork(sc); error = xchk_quota_data_fork(sc);
if (error) if (error)
goto out; goto out;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@ -260,10 +260,10 @@ xfs_scrub_quota(
sc->ilock_flags = 0; sc->ilock_flags = 0;
sqi.sc = sc; sqi.sc = sc;
sqi.last_id = 0; sqi.last_id = 0;
error = xfs_qm_dqiterate(mp, dqtype, xfs_scrub_quota_item, &sqi); error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
sc->ilock_flags = XFS_ILOCK_EXCL; sc->ilock_flags = XFS_ILOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags); xfs_ilock(sc->ip, sc->ilock_flags);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
sqi.last_id * qi->qi_dqperchunk, &error)) sqi.last_id * qi->qi_dqperchunk, &error))
goto out; goto out;

View File

@ -28,11 +28,11 @@
* Set us up to scrub reference count btrees. * Set us up to scrub reference count btrees.
*/ */
int int
xfs_scrub_setup_ag_refcountbt( xchk_setup_ag_refcountbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_ag_btree(sc, ip, false); return xchk_setup_ag_btree(sc, ip, false);
} }
/* Reference count btree scrubber. */ /* Reference count btree scrubber. */
@ -73,12 +73,12 @@ xfs_scrub_setup_ag_refcountbt(
* If the refcount is correct, all the check conditions in the algorithm * If the refcount is correct, all the check conditions in the algorithm
* should always hold true. If not, the refcount is incorrect. * should always hold true. If not, the refcount is incorrect.
*/ */
struct xfs_scrub_refcnt_frag { struct xchk_refcnt_frag {
struct list_head list; struct list_head list;
struct xfs_rmap_irec rm; struct xfs_rmap_irec rm;
}; };
struct xfs_scrub_refcnt_check { struct xchk_refcnt_check {
struct xfs_scrub_context *sc; struct xfs_scrub_context *sc;
struct list_head fragments; struct list_head fragments;
@ -99,18 +99,18 @@ struct xfs_scrub_refcnt_check {
* fragments as the refcountbt says we should have. * fragments as the refcountbt says we should have.
*/ */
STATIC int STATIC int
xfs_scrub_refcountbt_rmap_check( xchk_refcountbt_rmap_check(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec, struct xfs_rmap_irec *rec,
void *priv) void *priv)
{ {
struct xfs_scrub_refcnt_check *refchk = priv; struct xchk_refcnt_check *refchk = priv;
struct xfs_scrub_refcnt_frag *frag; struct xchk_refcnt_frag *frag;
xfs_agblock_t rm_last; xfs_agblock_t rm_last;
xfs_agblock_t rc_last; xfs_agblock_t rc_last;
int error = 0; int error = 0;
if (xfs_scrub_should_terminate(refchk->sc, &error)) if (xchk_should_terminate(refchk->sc, &error))
return error; return error;
rm_last = rec->rm_startblock + rec->rm_blockcount - 1; rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
@ -118,7 +118,7 @@ xfs_scrub_refcountbt_rmap_check(
/* Confirm that a single-owner refc extent is a CoW stage. */ /* Confirm that a single-owner refc extent is a CoW stage. */
if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) { if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0); xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
return 0; return 0;
} }
@ -135,7 +135,7 @@ xfs_scrub_refcountbt_rmap_check(
* is healthy each rmap_irec we see will be in agbno order * is healthy each rmap_irec we see will be in agbno order
* so we don't need insertion sort here. * so we don't need insertion sort here.
*/ */
frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag), frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
KM_MAYFAIL); KM_MAYFAIL);
if (!frag) if (!frag)
return -ENOMEM; return -ENOMEM;
@ -154,12 +154,12 @@ xfs_scrub_refcountbt_rmap_check(
* we have a refcountbt error. * we have a refcountbt error.
*/ */
STATIC void STATIC void
xfs_scrub_refcountbt_process_rmap_fragments( xchk_refcountbt_process_rmap_fragments(
struct xfs_scrub_refcnt_check *refchk) struct xchk_refcnt_check *refchk)
{ {
struct list_head worklist; struct list_head worklist;
struct xfs_scrub_refcnt_frag *frag; struct xchk_refcnt_frag *frag;
struct xfs_scrub_refcnt_frag *n; struct xchk_refcnt_frag *n;
xfs_agblock_t bno; xfs_agblock_t bno;
xfs_agblock_t rbno; xfs_agblock_t rbno;
xfs_agblock_t next_rbno; xfs_agblock_t next_rbno;
@ -277,13 +277,13 @@ xfs_scrub_refcountbt_process_rmap_fragments(
/* Use the rmap entries covering this extent to verify the refcount. */ /* Use the rmap entries covering this extent to verify the refcount. */
STATIC void STATIC void
xfs_scrub_refcountbt_xref_rmap( xchk_refcountbt_xref_rmap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t bno, xfs_agblock_t bno,
xfs_extlen_t len, xfs_extlen_t len,
xfs_nlink_t refcount) xfs_nlink_t refcount)
{ {
struct xfs_scrub_refcnt_check refchk = { struct xchk_refcnt_check refchk = {
.sc = sc, .sc = sc,
.bno = bno, .bno = bno,
.len = len, .len = len,
@ -292,11 +292,11 @@ xfs_scrub_refcountbt_xref_rmap(
}; };
struct xfs_rmap_irec low; struct xfs_rmap_irec low;
struct xfs_rmap_irec high; struct xfs_rmap_irec high;
struct xfs_scrub_refcnt_frag *frag; struct xchk_refcnt_frag *frag;
struct xfs_scrub_refcnt_frag *n; struct xchk_refcnt_frag *n;
int error; int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return; return;
/* Cross-reference with the rmapbt to confirm the refcount. */ /* Cross-reference with the rmapbt to confirm the refcount. */
@ -307,13 +307,13 @@ xfs_scrub_refcountbt_xref_rmap(
INIT_LIST_HEAD(&refchk.fragments); INIT_LIST_HEAD(&refchk.fragments);
error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
&xfs_scrub_refcountbt_rmap_check, &refchk); &xchk_refcountbt_rmap_check, &refchk);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
goto out_free; goto out_free;
xfs_scrub_refcountbt_process_rmap_fragments(&refchk); xchk_refcountbt_process_rmap_fragments(&refchk);
if (refcount != refchk.seen) if (refcount != refchk.seen)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
out_free: out_free:
list_for_each_entry_safe(frag, n, &refchk.fragments, list) { list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
@ -324,7 +324,7 @@ xfs_scrub_refcountbt_xref_rmap(
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_refcountbt_xref( xchk_refcountbt_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len, xfs_extlen_t len,
@ -333,15 +333,15 @@ xfs_scrub_refcountbt_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, len); xchk_xref_is_used_space(sc, agbno, len);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len); xchk_xref_is_not_inode_chunk(sc, agbno, len);
xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount); xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
} }
/* Scrub a refcountbt record. */ /* Scrub a refcountbt record. */
STATIC int STATIC int
xfs_scrub_refcountbt_rec( xchk_refcountbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_mount *mp = bs->cur->bc_mp;
@ -360,7 +360,7 @@ xfs_scrub_refcountbt_rec(
/* Only CoW records can have refcount == 1. */ /* Only CoW records can have refcount == 1. */
has_cowflag = (bno & XFS_REFC_COW_START); has_cowflag = (bno & XFS_REFC_COW_START);
if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag)) if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (has_cowflag) if (has_cowflag)
(*cow_blocks) += len; (*cow_blocks) += len;
@ -369,19 +369,19 @@ xfs_scrub_refcountbt_rec(
if (bno + len <= bno || if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) || !xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1)) !xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (refcount == 0) if (refcount == 0)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount); xchk_refcountbt_xref(bs->sc, bno, len, refcount);
return error; return error;
} }
/* Make sure we have as many refc blocks as the rmap says. */ /* Make sure we have as many refc blocks as the rmap says. */
STATIC void STATIC void
xfs_scrub_refcount_xref_rmap( xchk_refcount_xref_rmap(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_owner_info *oinfo, struct xfs_owner_info *oinfo,
xfs_filblks_t cow_blocks) xfs_filblks_t cow_blocks)
@ -390,33 +390,33 @@ xfs_scrub_refcount_xref_rmap(
xfs_filblks_t blocks; xfs_filblks_t blocks;
int error; int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return; return;
/* Check that we saw as many refcbt blocks as the rmap knows about. */ /* Check that we saw as many refcbt blocks as the rmap knows about. */
error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks); error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error)) if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
return; return;
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo, error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks); &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (blocks != refcbt_blocks) if (blocks != refcbt_blocks)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
/* Check that we saw as many cow blocks as the rmap knows about. */ /* Check that we saw as many cow blocks as the rmap knows about. */
xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW); xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo, error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks); &blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (blocks != cow_blocks) if (blocks != cow_blocks)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
} }
/* Scrub the refcount btree for some AG. */ /* Scrub the refcount btree for some AG. */
int int
xfs_scrub_refcountbt( xchk_refcountbt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
@ -424,19 +424,19 @@ xfs_scrub_refcountbt(
int error; int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec, error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
&oinfo, &cow_blocks); &oinfo, &cow_blocks);
if (error) if (error)
return error; return error;
xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks); xchk_refcount_xref_rmap(sc, &oinfo, cow_blocks);
return 0; return 0;
} }
/* xref check that a cow staging extent is marked in the refcountbt. */ /* xref check that a cow staging extent is marked in the refcountbt. */
void void
xfs_scrub_xref_is_cow_staging( xchk_xref_is_cow_staging(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
@ -446,35 +446,35 @@ xfs_scrub_xref_is_cow_staging(
int has_refcount; int has_refcount;
int error; int error;
if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return; return;
/* Find the CoW staging extent. */ /* Find the CoW staging extent. */
error = xfs_refcount_lookup_le(sc->sa.refc_cur, error = xfs_refcount_lookup_le(sc->sa.refc_cur,
agbno + XFS_REFC_COW_START, &has_refcount); agbno + XFS_REFC_COW_START, &has_refcount);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return; return;
if (!has_refcount) { if (!has_refcount) {
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return; return;
} }
error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount); error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return; return;
if (!has_refcount) { if (!has_refcount) {
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return; return;
} }
/* CoW flag must be set, refcount must be 1. */ /* CoW flag must be set, refcount must be 1. */
has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START); has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
if (!has_cowflag || rc.rc_refcount != 1) if (!has_cowflag || rc.rc_refcount != 1)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
/* Must be at least as long as what was passed in */ /* Must be at least as long as what was passed in */
if (rc.rc_blockcount < len) if (rc.rc_blockcount < len)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
} }
/* /*
@ -482,7 +482,7 @@ xfs_scrub_xref_is_cow_staging(
* can have multiple owners. * can have multiple owners.
*/ */
void void
xfs_scrub_xref_is_not_shared( xchk_xref_is_not_shared(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_agblock_t agbno,
xfs_extlen_t len) xfs_extlen_t len)
@ -490,12 +490,12 @@ xfs_scrub_xref_is_not_shared(
bool shared; bool shared;
int error; int error;
if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return; return;
error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared); error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return; return;
if (shared) if (shared)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
} }

View File

@ -50,7 +50,7 @@ xfs_repair_attempt(
trace_xfs_repair_attempt(ip, sc->sm, error); trace_xfs_repair_attempt(ip, sc->sm, error);
xfs_scrub_ag_btcur_free(&sc->sa); xchk_ag_btcur_free(&sc->sa);
/* Repair whatever's broken. */ /* Repair whatever's broken. */
ASSERT(sc->ops->repair); ASSERT(sc->ops->repair);
@ -110,7 +110,7 @@ xfs_repair_probe(
{ {
int error = 0; int error = 0;
if (xfs_scrub_should_terminate(sc, &error)) if (xchk_should_terminate(sc, &error))
return error; return error;
return 0; return 0;

View File

@ -29,18 +29,18 @@
* Set us up to scrub reverse mapping btrees. * Set us up to scrub reverse mapping btrees.
*/ */
int int
xfs_scrub_setup_ag_rmapbt( xchk_setup_ag_rmapbt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
return xfs_scrub_setup_ag_btree(sc, ip, false); return xchk_setup_ag_btree(sc, ip, false);
} }
/* Reverse-mapping scrubber. */ /* Reverse-mapping scrubber. */
/* Cross-reference a rmap against the refcount btree. */ /* Cross-reference a rmap against the refcount btree. */
STATIC void STATIC void
xfs_scrub_rmapbt_xref_refc( xchk_rmapbt_xref_refc(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_rmap_irec *irec) struct xfs_rmap_irec *irec)
{ {
@ -52,7 +52,7 @@ xfs_scrub_rmapbt_xref_refc(
bool is_unwritten; bool is_unwritten;
int error; int error;
if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return; return;
non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner); non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
@ -63,15 +63,15 @@ xfs_scrub_rmapbt_xref_refc(
/* If this is shared, must be a data fork extent. */ /* If this is shared, must be a data fork extent. */
error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock, error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
irec->rm_blockcount, &fbno, &flen, false); irec->rm_blockcount, &fbno, &flen, false);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return; return;
if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten)) if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
} }
/* Cross-reference with the other btrees. */ /* Cross-reference with the other btrees. */
STATIC void STATIC void
xfs_scrub_rmapbt_xref( xchk_rmapbt_xref(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_rmap_irec *irec) struct xfs_rmap_irec *irec)
{ {
@ -81,22 +81,22 @@ xfs_scrub_rmapbt_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return; return;
xfs_scrub_xref_is_used_space(sc, agbno, len); xchk_xref_is_used_space(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_INODES) if (irec->rm_owner == XFS_RMAP_OWN_INODES)
xfs_scrub_xref_is_inode_chunk(sc, agbno, len); xchk_xref_is_inode_chunk(sc, agbno, len);
else else
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len); xchk_xref_is_not_inode_chunk(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_COW) if (irec->rm_owner == XFS_RMAP_OWN_COW)
xfs_scrub_xref_is_cow_staging(sc, irec->rm_startblock, xchk_xref_is_cow_staging(sc, irec->rm_startblock,
irec->rm_blockcount); irec->rm_blockcount);
else else
xfs_scrub_rmapbt_xref_refc(sc, irec); xchk_rmapbt_xref_refc(sc, irec);
} }
/* Scrub an rmapbt record. */ /* Scrub an rmapbt record. */
STATIC int STATIC int
xfs_scrub_rmapbt_rec( xchk_rmapbt_rec(
struct xfs_scrub_btree *bs, struct xchk_btree *bs,
union xfs_btree_rec *rec) union xfs_btree_rec *rec)
{ {
struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_mount *mp = bs->cur->bc_mp;
@ -109,12 +109,12 @@ xfs_scrub_rmapbt_rec(
int error; int error;
error = xfs_rmap_btrec_to_irec(rec, &irec); error = xfs_rmap_btrec_to_irec(rec, &irec);
if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, 0, &error)) if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error))
goto out; goto out;
/* Check extent. */ /* Check extent. */
if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock) if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (irec.rm_owner == XFS_RMAP_OWN_FS) { if (irec.rm_owner == XFS_RMAP_OWN_FS) {
/* /*
@ -124,7 +124,7 @@ xfs_scrub_rmapbt_rec(
*/ */
if (irec.rm_startblock != 0 || if (irec.rm_startblock != 0 ||
irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1) irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else { } else {
/* /*
* Otherwise we must point somewhere past the static metadata * Otherwise we must point somewhere past the static metadata
@ -133,7 +133,7 @@ xfs_scrub_rmapbt_rec(
if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) || if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
!xfs_verify_agbno(mp, agno, irec.rm_startblock + !xfs_verify_agbno(mp, agno, irec.rm_startblock +
irec.rm_blockcount - 1)) irec.rm_blockcount - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} }
/* Check flags. */ /* Check flags. */
@ -143,47 +143,47 @@ xfs_scrub_rmapbt_rec(
is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN; is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
if (is_bmbt && irec.rm_offset != 0) if (is_bmbt && irec.rm_offset != 0)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && irec.rm_offset != 0) if (non_inode && irec.rm_offset != 0)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (is_unwritten && (is_bmbt || non_inode || is_attr)) if (is_unwritten && (is_bmbt || non_inode || is_attr))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && (is_bmbt || is_unwritten || is_attr)) if (non_inode && (is_bmbt || is_unwritten || is_attr))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (!non_inode) { if (!non_inode) {
if (!xfs_verify_ino(mp, irec.rm_owner)) if (!xfs_verify_ino(mp, irec.rm_owner))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else { } else {
/* Non-inode owner within the magic values? */ /* Non-inode owner within the magic values? */
if (irec.rm_owner <= XFS_RMAP_OWN_MIN || if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
irec.rm_owner > XFS_RMAP_OWN_FS) irec.rm_owner > XFS_RMAP_OWN_FS)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} }
xfs_scrub_rmapbt_xref(bs->sc, &irec); xchk_rmapbt_xref(bs->sc, &irec);
out: out:
return error; return error;
} }
/* Scrub the rmap btree for some AG. */ /* Scrub the rmap btree for some AG. */
int int
xfs_scrub_rmapbt( xchk_rmapbt(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_owner_info oinfo; struct xfs_owner_info oinfo;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
return xfs_scrub_btree(sc, sc->sa.rmap_cur, xfs_scrub_rmapbt_rec, return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec,
&oinfo, NULL); &oinfo, NULL);
} }
/* xref check that the extent is owned by a given owner */ /* xref check that the extent is owned by a given owner */
static inline void static inline void
xfs_scrub_xref_check_owner( xchk_xref_check_owner(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t bno, xfs_agblock_t bno,
xfs_extlen_t len, xfs_extlen_t len,
@ -193,42 +193,42 @@ xfs_scrub_xref_check_owner(
bool has_rmap; bool has_rmap;
int error; int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return; return;
error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo, error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
&has_rmap); &has_rmap);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (has_rmap != should_have_rmap) if (has_rmap != should_have_rmap)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
} }
/* xref check that the extent is owned by a given owner */ /* xref check that the extent is owned by a given owner */
void void
xfs_scrub_xref_is_owned_by( xchk_xref_is_owned_by(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t bno, xfs_agblock_t bno,
xfs_extlen_t len, xfs_extlen_t len,
struct xfs_owner_info *oinfo) struct xfs_owner_info *oinfo)
{ {
xfs_scrub_xref_check_owner(sc, bno, len, oinfo, true); xchk_xref_check_owner(sc, bno, len, oinfo, true);
} }
/* xref check that the extent is not owned by a given owner */ /* xref check that the extent is not owned by a given owner */
void void
xfs_scrub_xref_is_not_owned_by( xchk_xref_is_not_owned_by(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t bno, xfs_agblock_t bno,
xfs_extlen_t len, xfs_extlen_t len,
struct xfs_owner_info *oinfo) struct xfs_owner_info *oinfo)
{ {
xfs_scrub_xref_check_owner(sc, bno, len, oinfo, false); xchk_xref_check_owner(sc, bno, len, oinfo, false);
} }
/* xref check that the extent has no reverse mapping at all */ /* xref check that the extent has no reverse mapping at all */
void void
xfs_scrub_xref_has_no_owner( xchk_xref_has_no_owner(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_agblock_t bno, xfs_agblock_t bno,
xfs_extlen_t len) xfs_extlen_t len)
@ -236,12 +236,12 @@ xfs_scrub_xref_has_no_owner(
bool has_rmap; bool has_rmap;
int error; int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm)) if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return; return;
error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap); error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return; return;
if (has_rmap) if (has_rmap)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
} }

View File

@ -25,13 +25,13 @@
/* Set us up with the realtime metadata locked. */ /* Set us up with the realtime metadata locked. */
int int
xfs_scrub_setup_rt( xchk_setup_rt(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
int error; int error;
error = xfs_scrub_setup_fs(sc, ip); error = xchk_setup_fs(sc, ip);
if (error) if (error)
return error; return error;
@ -46,7 +46,7 @@ xfs_scrub_setup_rt(
/* Scrub a free extent record from the realtime bitmap. */ /* Scrub a free extent record from the realtime bitmap. */
STATIC int STATIC int
xfs_scrub_rtbitmap_rec( xchk_rtbitmap_rec(
struct xfs_trans *tp, struct xfs_trans *tp,
struct xfs_rtalloc_rec *rec, struct xfs_rtalloc_rec *rec,
void *priv) void *priv)
@ -61,24 +61,24 @@ xfs_scrub_rtbitmap_rec(
if (startblock + blockcount <= startblock || if (startblock + blockcount <= startblock ||
!xfs_verify_rtbno(sc->mp, startblock) || !xfs_verify_rtbno(sc->mp, startblock) ||
!xfs_verify_rtbno(sc->mp, startblock + blockcount - 1)) !xfs_verify_rtbno(sc->mp, startblock + blockcount - 1))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return 0; return 0;
} }
/* Scrub the realtime bitmap. */ /* Scrub the realtime bitmap. */
int int
xfs_scrub_rtbitmap( xchk_rtbitmap(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
int error; int error;
/* Invoke the fork scrubber. */ /* Invoke the fork scrubber. */
error = xfs_scrub_metadata_inode_forks(sc); error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error; return error;
error = xfs_rtalloc_query_all(sc->tp, xfs_scrub_rtbitmap_rec, sc); error = xfs_rtalloc_query_all(sc->tp, xchk_rtbitmap_rec, sc);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out; goto out;
out: out:
@ -87,7 +87,7 @@ xfs_scrub_rtbitmap(
/* Scrub the realtime summary. */ /* Scrub the realtime summary. */
int int
xfs_scrub_rtsummary( xchk_rtsummary(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_inode *rsumip = sc->mp->m_rsumip; struct xfs_inode *rsumip = sc->mp->m_rsumip;
@ -107,12 +107,12 @@ xfs_scrub_rtsummary(
xfs_ilock(sc->ip, sc->ilock_flags); xfs_ilock(sc->ip, sc->ilock_flags);
/* Invoke the fork scrubber. */ /* Invoke the fork scrubber. */
error = xfs_scrub_metadata_inode_forks(sc); error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
goto out; goto out;
/* XXX: implement this some day */ /* XXX: implement this some day */
xfs_scrub_set_incomplete(sc); xchk_set_incomplete(sc);
out: out:
/* Switch back to the rtbitmap inode and lock flags. */ /* Switch back to the rtbitmap inode and lock flags. */
xfs_iunlock(sc->ip, sc->ilock_flags); xfs_iunlock(sc->ip, sc->ilock_flags);
@ -124,7 +124,7 @@ xfs_scrub_rtsummary(
/* xref check that the extent is not free in the rtbitmap */ /* xref check that the extent is not free in the rtbitmap */
void void
xfs_scrub_xref_is_used_rt_space( xchk_xref_is_used_rt_space(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
xfs_rtblock_t fsbno, xfs_rtblock_t fsbno,
xfs_extlen_t len) xfs_extlen_t len)
@ -135,7 +135,7 @@ xfs_scrub_xref_is_used_rt_space(
bool is_free; bool is_free;
int error; int error;
if (xfs_scrub_skip_xref(sc->sm)) if (xchk_skip_xref(sc->sm))
return; return;
startext = fsbno; startext = fsbno;
@ -147,10 +147,10 @@ xfs_scrub_xref_is_used_rt_space(
xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount, error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount,
&is_free); &is_free);
if (!xfs_scrub_should_check_xref(sc, &error, NULL)) if (!xchk_should_check_xref(sc, &error, NULL))
goto out_unlock; goto out_unlock;
if (is_free) if (is_free)
xfs_scrub_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino); xchk_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
out_unlock: out_unlock:
xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP); xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
} }

View File

@ -131,6 +131,12 @@
* optimize the structure so that the rebuild knows what to do. The * optimize the structure so that the rebuild knows what to do. The
* second check evaluates the completeness of the repair; that is what * second check evaluates the completeness of the repair; that is what
* is reported to userspace. * is reported to userspace.
*
* A quick note on symbol prefixes:
* - "xfs_" are general XFS symbols.
* - "xchk_" are symbols related to metadata checking.
* - "xrep_" are symbols related to metadata repair.
* - "xfs_scrub_" are symbols that tie online fsck to the rest of XFS.
*/ */
/* /*
@ -144,12 +150,12 @@
* supported by the running kernel. * supported by the running kernel.
*/ */
static int static int
xfs_scrub_probe( xchk_probe(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
int error = 0; int error = 0;
if (xfs_scrub_should_terminate(sc, &error)) if (xchk_should_terminate(sc, &error))
return error; return error;
return 0; return 0;
@ -159,12 +165,12 @@ xfs_scrub_probe(
/* Free all the resources and finish the transactions. */ /* Free all the resources and finish the transactions. */
STATIC int STATIC int
xfs_scrub_teardown( xchk_teardown(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip_in, struct xfs_inode *ip_in,
int error) int error)
{ {
xfs_scrub_ag_free(sc, &sc->sa); xchk_ag_free(sc, &sc->sa);
if (sc->tp) { if (sc->tp) {
if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
error = xfs_trans_commit(sc->tp); error = xfs_trans_commit(sc->tp);
@ -191,165 +197,165 @@ xfs_scrub_teardown(
/* Scrubbing dispatch. */ /* Scrubbing dispatch. */
static const struct xfs_scrub_meta_ops meta_scrub_ops[] = { static const struct xchk_meta_ops meta_scrub_ops[] = {
[XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */ [XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */
.type = ST_NONE, .type = ST_NONE,
.setup = xfs_scrub_setup_fs, .setup = xchk_setup_fs,
.scrub = xfs_scrub_probe, .scrub = xchk_probe,
.repair = xfs_repair_probe, .repair = xfs_repair_probe,
}, },
[XFS_SCRUB_TYPE_SB] = { /* superblock */ [XFS_SCRUB_TYPE_SB] = { /* superblock */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_fs, .setup = xchk_setup_fs,
.scrub = xfs_scrub_superblock, .scrub = xchk_superblock,
.repair = xfs_repair_superblock, .repair = xfs_repair_superblock,
}, },
[XFS_SCRUB_TYPE_AGF] = { /* agf */ [XFS_SCRUB_TYPE_AGF] = { /* agf */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_fs, .setup = xchk_setup_fs,
.scrub = xfs_scrub_agf, .scrub = xchk_agf,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_AGFL]= { /* agfl */ [XFS_SCRUB_TYPE_AGFL]= { /* agfl */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_fs, .setup = xchk_setup_fs,
.scrub = xfs_scrub_agfl, .scrub = xchk_agfl,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_AGI] = { /* agi */ [XFS_SCRUB_TYPE_AGI] = { /* agi */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_fs, .setup = xchk_setup_fs,
.scrub = xfs_scrub_agi, .scrub = xchk_agi,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */ [XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_allocbt, .setup = xchk_setup_ag_allocbt,
.scrub = xfs_scrub_bnobt, .scrub = xchk_bnobt,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */ [XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_allocbt, .setup = xchk_setup_ag_allocbt,
.scrub = xfs_scrub_cntbt, .scrub = xchk_cntbt,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_INOBT] = { /* inobt */ [XFS_SCRUB_TYPE_INOBT] = { /* inobt */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_iallocbt, .setup = xchk_setup_ag_iallocbt,
.scrub = xfs_scrub_inobt, .scrub = xchk_inobt,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_FINOBT] = { /* finobt */ [XFS_SCRUB_TYPE_FINOBT] = { /* finobt */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_iallocbt, .setup = xchk_setup_ag_iallocbt,
.scrub = xfs_scrub_finobt, .scrub = xchk_finobt,
.has = xfs_sb_version_hasfinobt, .has = xfs_sb_version_hasfinobt,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */ [XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_rmapbt, .setup = xchk_setup_ag_rmapbt,
.scrub = xfs_scrub_rmapbt, .scrub = xchk_rmapbt,
.has = xfs_sb_version_hasrmapbt, .has = xfs_sb_version_hasrmapbt,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */ [XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */
.type = ST_PERAG, .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_refcountbt, .setup = xchk_setup_ag_refcountbt,
.scrub = xfs_scrub_refcountbt, .scrub = xchk_refcountbt,
.has = xfs_sb_version_hasreflink, .has = xfs_sb_version_hasreflink,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_INODE] = { /* inode record */ [XFS_SCRUB_TYPE_INODE] = { /* inode record */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_inode, .setup = xchk_setup_inode,
.scrub = xfs_scrub_inode, .scrub = xchk_inode,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */ [XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap, .setup = xchk_setup_inode_bmap,
.scrub = xfs_scrub_bmap_data, .scrub = xchk_bmap_data,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */ [XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap, .setup = xchk_setup_inode_bmap,
.scrub = xfs_scrub_bmap_attr, .scrub = xchk_bmap_attr,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */ [XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap, .setup = xchk_setup_inode_bmap,
.scrub = xfs_scrub_bmap_cow, .scrub = xchk_bmap_cow,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_DIR] = { /* directory */ [XFS_SCRUB_TYPE_DIR] = { /* directory */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_directory, .setup = xchk_setup_directory,
.scrub = xfs_scrub_directory, .scrub = xchk_directory,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */ [XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_xattr, .setup = xchk_setup_xattr,
.scrub = xfs_scrub_xattr, .scrub = xchk_xattr,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */ [XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_symlink, .setup = xchk_setup_symlink,
.scrub = xfs_scrub_symlink, .scrub = xchk_symlink,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */ [XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */
.type = ST_INODE, .type = ST_INODE,
.setup = xfs_scrub_setup_parent, .setup = xchk_setup_parent,
.scrub = xfs_scrub_parent, .scrub = xchk_parent,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */ [XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */
.type = ST_FS, .type = ST_FS,
.setup = xfs_scrub_setup_rt, .setup = xchk_setup_rt,
.scrub = xfs_scrub_rtbitmap, .scrub = xchk_rtbitmap,
.has = xfs_sb_version_hasrealtime, .has = xfs_sb_version_hasrealtime,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */ [XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */
.type = ST_FS, .type = ST_FS,
.setup = xfs_scrub_setup_rt, .setup = xchk_setup_rt,
.scrub = xfs_scrub_rtsummary, .scrub = xchk_rtsummary,
.has = xfs_sb_version_hasrealtime, .has = xfs_sb_version_hasrealtime,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */ [XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */
.type = ST_FS, .type = ST_FS,
.setup = xfs_scrub_setup_quota, .setup = xchk_setup_quota,
.scrub = xfs_scrub_quota, .scrub = xchk_quota,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */ [XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */
.type = ST_FS, .type = ST_FS,
.setup = xfs_scrub_setup_quota, .setup = xchk_setup_quota,
.scrub = xfs_scrub_quota, .scrub = xchk_quota,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
[XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */ [XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */
.type = ST_FS, .type = ST_FS,
.setup = xfs_scrub_setup_quota, .setup = xchk_setup_quota,
.scrub = xfs_scrub_quota, .scrub = xchk_quota,
.repair = xfs_repair_notsupported, .repair = xfs_repair_notsupported,
}, },
}; };
/* This isn't a stable feature, warn once per day. */ /* This isn't a stable feature, warn once per day. */
static inline void static inline void
xfs_scrub_experimental_warning( xchk_experimental_warning(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT( static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
"xfs_scrub_warning", 86400 * HZ, 1); "xchk_warning", 86400 * HZ, 1);
ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE); ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&scrub_warning)) if (__ratelimit(&scrub_warning))
@ -358,12 +364,12 @@ xfs_scrub_experimental_warning(
} }
static int static int
xfs_scrub_validate_inputs( xchk_validate_inputs(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_scrub_metadata *sm) struct xfs_scrub_metadata *sm)
{ {
int error; int error;
const struct xfs_scrub_meta_ops *ops; const struct xchk_meta_ops *ops;
error = -EINVAL; error = -EINVAL;
/* Check our inputs. */ /* Check our inputs. */
@ -441,7 +447,7 @@ xfs_scrub_validate_inputs(
} }
#ifdef CONFIG_XFS_ONLINE_REPAIR #ifdef CONFIG_XFS_ONLINE_REPAIR
static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc) static inline void xchk_postmortem(struct xfs_scrub_context *sc)
{ {
/* /*
* Userspace asked us to repair something, we repaired it, rescanned * Userspace asked us to repair something, we repaired it, rescanned
@ -454,7 +460,7 @@ static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc)
xfs_repair_failure(sc->mp); xfs_repair_failure(sc->mp);
} }
#else #else
static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc) static inline void xchk_postmortem(struct xfs_scrub_context *sc)
{ {
/* /*
* Userspace asked us to scrub something, it's broken, and we have no * Userspace asked us to scrub something, it's broken, and we have no
@ -480,9 +486,9 @@ xfs_scrub_metadata(
int error = 0; int error = 0;
BUILD_BUG_ON(sizeof(meta_scrub_ops) != BUILD_BUG_ON(sizeof(meta_scrub_ops) !=
(sizeof(struct xfs_scrub_meta_ops) * XFS_SCRUB_TYPE_NR)); (sizeof(struct xchk_meta_ops) * XFS_SCRUB_TYPE_NR));
trace_xfs_scrub_start(ip, sm, error); trace_xchk_start(ip, sm, error);
/* Forbidden if we are shut down or mounted norecovery. */ /* Forbidden if we are shut down or mounted norecovery. */
error = -ESHUTDOWN; error = -ESHUTDOWN;
@ -492,11 +498,11 @@ xfs_scrub_metadata(
if (mp->m_flags & XFS_MOUNT_NORECOVERY) if (mp->m_flags & XFS_MOUNT_NORECOVERY)
goto out; goto out;
error = xfs_scrub_validate_inputs(mp, sm); error = xchk_validate_inputs(mp, sm);
if (error) if (error)
goto out; goto out;
xfs_scrub_experimental_warning(mp); xchk_experimental_warning(mp);
retry_op: retry_op:
/* Set up for the operation. */ /* Set up for the operation. */
@ -518,7 +524,7 @@ xfs_scrub_metadata(
* Tear down everything we hold, then set up again with * Tear down everything we hold, then set up again with
* preparation for worst-case scenarios. * preparation for worst-case scenarios.
*/ */
error = xfs_scrub_teardown(&sc, ip, 0); error = xchk_teardown(&sc, ip, 0);
if (error) if (error)
goto out; goto out;
try_harder = true; try_harder = true;
@ -553,7 +559,7 @@ xfs_scrub_metadata(
if (error == -EAGAIN) { if (error == -EAGAIN) {
if (sc.try_harder) if (sc.try_harder)
try_harder = true; try_harder = true;
error = xfs_scrub_teardown(&sc, ip, 0); error = xchk_teardown(&sc, ip, 0);
if (error) { if (error) {
xfs_repair_failure(mp); xfs_repair_failure(mp);
goto out; goto out;
@ -563,11 +569,11 @@ xfs_scrub_metadata(
} }
out_nofix: out_nofix:
xfs_scrub_postmortem(&sc); xchk_postmortem(&sc);
out_teardown: out_teardown:
error = xfs_scrub_teardown(&sc, ip, error); error = xchk_teardown(&sc, ip, error);
out: out:
trace_xfs_scrub_done(ip, sm, error); trace_xchk_done(ip, sm, error);
if (error == -EFSCORRUPTED || error == -EFSBADCRC) { if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
error = 0; error = 0;

View File

@ -9,14 +9,14 @@
struct xfs_scrub_context; struct xfs_scrub_context;
/* Type info and names for the scrub types. */ /* Type info and names for the scrub types. */
enum xfs_scrub_type { enum xchk_type {
ST_NONE = 1, /* disabled */ ST_NONE = 1, /* disabled */
ST_PERAG, /* per-AG metadata */ ST_PERAG, /* per-AG metadata */
ST_FS, /* per-FS metadata */ ST_FS, /* per-FS metadata */
ST_INODE, /* per-inode metadata */ ST_INODE, /* per-inode metadata */
}; };
struct xfs_scrub_meta_ops { struct xchk_meta_ops {
/* Acquire whatever resources are needed for the operation. */ /* Acquire whatever resources are needed for the operation. */
int (*setup)(struct xfs_scrub_context *, int (*setup)(struct xfs_scrub_context *,
struct xfs_inode *); struct xfs_inode *);
@ -31,11 +31,11 @@ struct xfs_scrub_meta_ops {
bool (*has)(struct xfs_sb *); bool (*has)(struct xfs_sb *);
/* type describing required/allowed inputs */ /* type describing required/allowed inputs */
enum xfs_scrub_type type; enum xchk_type type;
}; };
/* Buffer pointers and btree cursors for an entire AG. */ /* Buffer pointers and btree cursors for an entire AG. */
struct xfs_scrub_ag { struct xchk_ag {
xfs_agnumber_t agno; xfs_agnumber_t agno;
struct xfs_perag *pag; struct xfs_perag *pag;
@ -57,7 +57,7 @@ struct xfs_scrub_context {
/* General scrub state. */ /* General scrub state. */
struct xfs_mount *mp; struct xfs_mount *mp;
struct xfs_scrub_metadata *sm; struct xfs_scrub_metadata *sm;
const struct xfs_scrub_meta_ops *ops; const struct xchk_meta_ops *ops;
struct xfs_trans *tp; struct xfs_trans *tp;
struct xfs_inode *ip; struct xfs_inode *ip;
void *buf; void *buf;
@ -66,78 +66,78 @@ struct xfs_scrub_context {
bool has_quotaofflock; bool has_quotaofflock;
/* State tracking for single-AG operations. */ /* State tracking for single-AG operations. */
struct xfs_scrub_ag sa; struct xchk_ag sa;
}; };
/* Metadata scrubbers */ /* Metadata scrubbers */
int xfs_scrub_tester(struct xfs_scrub_context *sc); int xchk_tester(struct xfs_scrub_context *sc);
int xfs_scrub_superblock(struct xfs_scrub_context *sc); int xchk_superblock(struct xfs_scrub_context *sc);
int xfs_scrub_agf(struct xfs_scrub_context *sc); int xchk_agf(struct xfs_scrub_context *sc);
int xfs_scrub_agfl(struct xfs_scrub_context *sc); int xchk_agfl(struct xfs_scrub_context *sc);
int xfs_scrub_agi(struct xfs_scrub_context *sc); int xchk_agi(struct xfs_scrub_context *sc);
int xfs_scrub_bnobt(struct xfs_scrub_context *sc); int xchk_bnobt(struct xfs_scrub_context *sc);
int xfs_scrub_cntbt(struct xfs_scrub_context *sc); int xchk_cntbt(struct xfs_scrub_context *sc);
int xfs_scrub_inobt(struct xfs_scrub_context *sc); int xchk_inobt(struct xfs_scrub_context *sc);
int xfs_scrub_finobt(struct xfs_scrub_context *sc); int xchk_finobt(struct xfs_scrub_context *sc);
int xfs_scrub_rmapbt(struct xfs_scrub_context *sc); int xchk_rmapbt(struct xfs_scrub_context *sc);
int xfs_scrub_refcountbt(struct xfs_scrub_context *sc); int xchk_refcountbt(struct xfs_scrub_context *sc);
int xfs_scrub_inode(struct xfs_scrub_context *sc); int xchk_inode(struct xfs_scrub_context *sc);
int xfs_scrub_bmap_data(struct xfs_scrub_context *sc); int xchk_bmap_data(struct xfs_scrub_context *sc);
int xfs_scrub_bmap_attr(struct xfs_scrub_context *sc); int xchk_bmap_attr(struct xfs_scrub_context *sc);
int xfs_scrub_bmap_cow(struct xfs_scrub_context *sc); int xchk_bmap_cow(struct xfs_scrub_context *sc);
int xfs_scrub_directory(struct xfs_scrub_context *sc); int xchk_directory(struct xfs_scrub_context *sc);
int xfs_scrub_xattr(struct xfs_scrub_context *sc); int xchk_xattr(struct xfs_scrub_context *sc);
int xfs_scrub_symlink(struct xfs_scrub_context *sc); int xchk_symlink(struct xfs_scrub_context *sc);
int xfs_scrub_parent(struct xfs_scrub_context *sc); int xchk_parent(struct xfs_scrub_context *sc);
#ifdef CONFIG_XFS_RT #ifdef CONFIG_XFS_RT
int xfs_scrub_rtbitmap(struct xfs_scrub_context *sc); int xchk_rtbitmap(struct xfs_scrub_context *sc);
int xfs_scrub_rtsummary(struct xfs_scrub_context *sc); int xchk_rtsummary(struct xfs_scrub_context *sc);
#else #else
static inline int static inline int
xfs_scrub_rtbitmap(struct xfs_scrub_context *sc) xchk_rtbitmap(struct xfs_scrub_context *sc)
{ {
return -ENOENT; return -ENOENT;
} }
static inline int static inline int
xfs_scrub_rtsummary(struct xfs_scrub_context *sc) xchk_rtsummary(struct xfs_scrub_context *sc)
{ {
return -ENOENT; return -ENOENT;
} }
#endif #endif
#ifdef CONFIG_XFS_QUOTA #ifdef CONFIG_XFS_QUOTA
int xfs_scrub_quota(struct xfs_scrub_context *sc); int xchk_quota(struct xfs_scrub_context *sc);
#else #else
static inline int static inline int
xfs_scrub_quota(struct xfs_scrub_context *sc) xchk_quota(struct xfs_scrub_context *sc)
{ {
return -ENOENT; return -ENOENT;
} }
#endif #endif
/* cross-referencing helpers */ /* cross-referencing helpers */
void xfs_scrub_xref_is_used_space(struct xfs_scrub_context *sc, void xchk_xref_is_used_space(struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_extlen_t len); xfs_agblock_t agbno, xfs_extlen_t len);
void xfs_scrub_xref_is_not_inode_chunk(struct xfs_scrub_context *sc, void xchk_xref_is_not_inode_chunk(struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_extlen_t len); xfs_agblock_t agbno, xfs_extlen_t len);
void xfs_scrub_xref_is_inode_chunk(struct xfs_scrub_context *sc, void xchk_xref_is_inode_chunk(struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_extlen_t len); xfs_agblock_t agbno, xfs_extlen_t len);
void xfs_scrub_xref_is_owned_by(struct xfs_scrub_context *sc, void xchk_xref_is_owned_by(struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_extlen_t len, xfs_agblock_t agbno, xfs_extlen_t len,
struct xfs_owner_info *oinfo); struct xfs_owner_info *oinfo);
void xfs_scrub_xref_is_not_owned_by(struct xfs_scrub_context *sc, void xchk_xref_is_not_owned_by(struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_extlen_t len, xfs_agblock_t agbno, xfs_extlen_t len,
struct xfs_owner_info *oinfo); struct xfs_owner_info *oinfo);
void xfs_scrub_xref_has_no_owner(struct xfs_scrub_context *sc, void xchk_xref_has_no_owner(struct xfs_scrub_context *sc,
xfs_agblock_t agbno, xfs_extlen_t len); xfs_agblock_t agbno, xfs_extlen_t len);
void xfs_scrub_xref_is_cow_staging(struct xfs_scrub_context *sc, void xchk_xref_is_cow_staging(struct xfs_scrub_context *sc,
xfs_agblock_t bno, xfs_extlen_t len); xfs_agblock_t bno, xfs_extlen_t len);
void xfs_scrub_xref_is_not_shared(struct xfs_scrub_context *sc, void xchk_xref_is_not_shared(struct xfs_scrub_context *sc,
xfs_agblock_t bno, xfs_extlen_t len); xfs_agblock_t bno, xfs_extlen_t len);
#ifdef CONFIG_XFS_RT #ifdef CONFIG_XFS_RT
void xfs_scrub_xref_is_used_rt_space(struct xfs_scrub_context *sc, void xchk_xref_is_used_rt_space(struct xfs_scrub_context *sc,
xfs_rtblock_t rtbno, xfs_extlen_t len); xfs_rtblock_t rtbno, xfs_extlen_t len);
#else #else
# define xfs_scrub_xref_is_used_rt_space(sc, rtbno, len) do { } while (0) # define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
#endif #endif
#endif /* __XFS_SCRUB_SCRUB_H__ */ #endif /* __XFS_SCRUB_SCRUB_H__ */

View File

@ -25,7 +25,7 @@
/* Set us up to scrub a symbolic link. */ /* Set us up to scrub a symbolic link. */
int int
xfs_scrub_setup_symlink( xchk_setup_symlink(
struct xfs_scrub_context *sc, struct xfs_scrub_context *sc,
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
@ -34,13 +34,13 @@ xfs_scrub_setup_symlink(
if (!sc->buf) if (!sc->buf)
return -ENOMEM; return -ENOMEM;
return xfs_scrub_setup_inode_contents(sc, ip, 0); return xchk_setup_inode_contents(sc, ip, 0);
} }
/* Symbolic links. */ /* Symbolic links. */
int int
xfs_scrub_symlink( xchk_symlink(
struct xfs_scrub_context *sc) struct xfs_scrub_context *sc)
{ {
struct xfs_inode *ip = sc->ip; struct xfs_inode *ip = sc->ip;
@ -55,7 +55,7 @@ xfs_scrub_symlink(
/* Plausible size? */ /* Plausible size? */
if (len > XFS_SYMLINK_MAXLEN || len <= 0) { if (len > XFS_SYMLINK_MAXLEN || len <= 0) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
@ -63,16 +63,16 @@ xfs_scrub_symlink(
if (ifp->if_flags & XFS_IFINLINE) { if (ifp->if_flags & XFS_IFINLINE) {
if (len > XFS_IFORK_DSIZE(ip) || if (len > XFS_IFORK_DSIZE(ip) ||
len > strnlen(ifp->if_u1.if_data, XFS_IFORK_DSIZE(ip))) len > strnlen(ifp->if_u1.if_data, XFS_IFORK_DSIZE(ip)))
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out; goto out;
} }
/* Remote symlink; must read the contents. */ /* Remote symlink; must read the contents. */
error = xfs_readlink_bmap_ilocked(sc->ip, sc->buf); error = xfs_readlink_bmap_ilocked(sc->ip, sc->buf);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out; goto out;
if (strnlen(sc->buf, XFS_SYMLINK_MAXLEN) < len) if (strnlen(sc->buf, XFS_SYMLINK_MAXLEN) < len)
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
out: out:
return error; return error;
} }

View File

@ -22,7 +22,7 @@
/* Figure out which block the btree cursor was pointing to. */ /* Figure out which block the btree cursor was pointing to. */
static inline xfs_fsblock_t static inline xfs_fsblock_t
xfs_scrub_btree_cur_fsbno( xchk_btree_cur_fsbno(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
int level) int level)
{ {

View File

@ -12,7 +12,7 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include "xfs_bit.h" #include "xfs_bit.h"
DECLARE_EVENT_CLASS(xfs_scrub_class, DECLARE_EVENT_CLASS(xchk_class,
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
int error), int error),
TP_ARGS(ip, sm, error), TP_ARGS(ip, sm, error),
@ -47,18 +47,18 @@ DECLARE_EVENT_CLASS(xfs_scrub_class,
__entry->error) __entry->error)
) )
#define DEFINE_SCRUB_EVENT(name) \ #define DEFINE_SCRUB_EVENT(name) \
DEFINE_EVENT(xfs_scrub_class, name, \ DEFINE_EVENT(xchk_class, name, \
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, \ TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, \
int error), \ int error), \
TP_ARGS(ip, sm, error)) TP_ARGS(ip, sm, error))
DEFINE_SCRUB_EVENT(xfs_scrub_start); DEFINE_SCRUB_EVENT(xchk_start);
DEFINE_SCRUB_EVENT(xfs_scrub_done); DEFINE_SCRUB_EVENT(xchk_done);
DEFINE_SCRUB_EVENT(xfs_scrub_deadlock_retry); DEFINE_SCRUB_EVENT(xchk_deadlock_retry);
DEFINE_SCRUB_EVENT(xfs_repair_attempt); DEFINE_SCRUB_EVENT(xfs_repair_attempt);
DEFINE_SCRUB_EVENT(xfs_repair_done); DEFINE_SCRUB_EVENT(xfs_repair_done);
TRACE_EVENT(xfs_scrub_op_error, TRACE_EVENT(xchk_op_error,
TP_PROTO(struct xfs_scrub_context *sc, xfs_agnumber_t agno, TP_PROTO(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int error, void *ret_ip), xfs_agblock_t bno, int error, void *ret_ip),
TP_ARGS(sc, agno, bno, error, ret_ip), TP_ARGS(sc, agno, bno, error, ret_ip),
@ -87,7 +87,7 @@ TRACE_EVENT(xfs_scrub_op_error,
__entry->ret_ip) __entry->ret_ip)
); );
TRACE_EVENT(xfs_scrub_file_op_error, TRACE_EVENT(xchk_file_op_error,
TP_PROTO(struct xfs_scrub_context *sc, int whichfork, TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset, int error, void *ret_ip), xfs_fileoff_t offset, int error, void *ret_ip),
TP_ARGS(sc, whichfork, offset, error, ret_ip), TP_ARGS(sc, whichfork, offset, error, ret_ip),
@ -119,7 +119,7 @@ TRACE_EVENT(xfs_scrub_file_op_error,
__entry->ret_ip) __entry->ret_ip)
); );
DECLARE_EVENT_CLASS(xfs_scrub_block_error_class, DECLARE_EVENT_CLASS(xchk_block_error_class,
TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, void *ret_ip), TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, void *ret_ip),
TP_ARGS(sc, daddr, ret_ip), TP_ARGS(sc, daddr, ret_ip),
TP_STRUCT__entry( TP_STRUCT__entry(
@ -153,15 +153,15 @@ DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
) )
#define DEFINE_SCRUB_BLOCK_ERROR_EVENT(name) \ #define DEFINE_SCRUB_BLOCK_ERROR_EVENT(name) \
DEFINE_EVENT(xfs_scrub_block_error_class, name, \ DEFINE_EVENT(xchk_block_error_class, name, \
TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, \ TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, \
void *ret_ip), \ void *ret_ip), \
TP_ARGS(sc, daddr, ret_ip)) TP_ARGS(sc, daddr, ret_ip))
DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_error); DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_error);
DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_preen); DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen);
DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class, DECLARE_EVENT_CLASS(xchk_ino_error_class,
TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, void *ret_ip), TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, void *ret_ip),
TP_ARGS(sc, ino, ret_ip), TP_ARGS(sc, ino, ret_ip),
TP_STRUCT__entry( TP_STRUCT__entry(
@ -184,16 +184,16 @@ DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
) )
#define DEFINE_SCRUB_INO_ERROR_EVENT(name) \ #define DEFINE_SCRUB_INO_ERROR_EVENT(name) \
DEFINE_EVENT(xfs_scrub_ino_error_class, name, \ DEFINE_EVENT(xchk_ino_error_class, name, \
TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, \ TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, \
void *ret_ip), \ void *ret_ip), \
TP_ARGS(sc, ino, ret_ip)) TP_ARGS(sc, ino, ret_ip))
DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_error); DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_error);
DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_preen); DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_preen);
DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_warning); DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_warning);
DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class, DECLARE_EVENT_CLASS(xchk_fblock_error_class,
TP_PROTO(struct xfs_scrub_context *sc, int whichfork, TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset, void *ret_ip), xfs_fileoff_t offset, void *ret_ip),
TP_ARGS(sc, whichfork, offset, ret_ip), TP_ARGS(sc, whichfork, offset, ret_ip),
@ -223,15 +223,15 @@ DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
); );
#define DEFINE_SCRUB_FBLOCK_ERROR_EVENT(name) \ #define DEFINE_SCRUB_FBLOCK_ERROR_EVENT(name) \
DEFINE_EVENT(xfs_scrub_fblock_error_class, name, \ DEFINE_EVENT(xchk_fblock_error_class, name, \
TP_PROTO(struct xfs_scrub_context *sc, int whichfork, \ TP_PROTO(struct xfs_scrub_context *sc, int whichfork, \
xfs_fileoff_t offset, void *ret_ip), \ xfs_fileoff_t offset, void *ret_ip), \
TP_ARGS(sc, whichfork, offset, ret_ip)) TP_ARGS(sc, whichfork, offset, ret_ip))
DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_error); DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_error);
DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_warning); DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_warning);
TRACE_EVENT(xfs_scrub_incomplete, TRACE_EVENT(xchk_incomplete,
TP_PROTO(struct xfs_scrub_context *sc, void *ret_ip), TP_PROTO(struct xfs_scrub_context *sc, void *ret_ip),
TP_ARGS(sc, ret_ip), TP_ARGS(sc, ret_ip),
TP_STRUCT__entry( TP_STRUCT__entry(
@ -250,7 +250,7 @@ TRACE_EVENT(xfs_scrub_incomplete,
__entry->ret_ip) __entry->ret_ip)
); );
TRACE_EVENT(xfs_scrub_btree_op_error, TRACE_EVENT(xchk_btree_op_error,
TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip), int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip), TP_ARGS(sc, cur, level, error, ret_ip),
@ -266,7 +266,7 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__field(void *, ret_ip) __field(void *, ret_ip)
), ),
TP_fast_assign( TP_fast_assign(
xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level); xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev; __entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type; __entry->type = sc->sm->sm_type;
@ -290,7 +290,7 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__entry->ret_ip) __entry->ret_ip)
); );
TRACE_EVENT(xfs_scrub_ifork_btree_op_error, TRACE_EVENT(xchk_ifork_btree_op_error,
TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip), int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip), TP_ARGS(sc, cur, level, error, ret_ip),
@ -308,7 +308,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__field(void *, ret_ip) __field(void *, ret_ip)
), ),
TP_fast_assign( TP_fast_assign(
xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level); xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev; __entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino; __entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_private.b.whichfork; __entry->whichfork = cur->bc_private.b.whichfork;
@ -335,7 +335,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__entry->ret_ip) __entry->ret_ip)
); );
TRACE_EVENT(xfs_scrub_btree_error, TRACE_EVENT(xchk_btree_error,
TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip), int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip), TP_ARGS(sc, cur, level, ret_ip),
@ -350,7 +350,7 @@ TRACE_EVENT(xfs_scrub_btree_error,
__field(void *, ret_ip) __field(void *, ret_ip)
), ),
TP_fast_assign( TP_fast_assign(
xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level); xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev; __entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type; __entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum; __entry->btnum = cur->bc_btnum;
@ -371,7 +371,7 @@ TRACE_EVENT(xfs_scrub_btree_error,
__entry->ret_ip) __entry->ret_ip)
); );
TRACE_EVENT(xfs_scrub_ifork_btree_error, TRACE_EVENT(xchk_ifork_btree_error,
TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip), int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip), TP_ARGS(sc, cur, level, ret_ip),
@ -388,7 +388,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__field(void *, ret_ip) __field(void *, ret_ip)
), ),
TP_fast_assign( TP_fast_assign(
xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level); xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev; __entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino; __entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_private.b.whichfork; __entry->whichfork = cur->bc_private.b.whichfork;
@ -413,7 +413,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__entry->ret_ip) __entry->ret_ip)
); );
DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class, DECLARE_EVENT_CLASS(xchk_sbtree_class,
TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
int level), int level),
TP_ARGS(sc, cur, level), TP_ARGS(sc, cur, level),
@ -428,7 +428,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
__field(int, ptr) __field(int, ptr)
), ),
TP_fast_assign( TP_fast_assign(
xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level); xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev; __entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type; __entry->type = sc->sm->sm_type;
@ -450,15 +450,15 @@ DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
__entry->ptr) __entry->ptr)
) )
#define DEFINE_SCRUB_SBTREE_EVENT(name) \ #define DEFINE_SCRUB_SBTREE_EVENT(name) \
DEFINE_EVENT(xfs_scrub_sbtree_class, name, \ DEFINE_EVENT(xchk_sbtree_class, name, \
TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, \ TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, \
int level), \ int level), \
TP_ARGS(sc, cur, level)) TP_ARGS(sc, cur, level))
DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_rec); DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_rec);
DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_key); DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_key);
TRACE_EVENT(xfs_scrub_xref_error, TRACE_EVENT(xchk_xref_error,
TP_PROTO(struct xfs_scrub_context *sc, int error, void *ret_ip), TP_PROTO(struct xfs_scrub_context *sc, int error, void *ret_ip),
TP_ARGS(sc, error, ret_ip), TP_ARGS(sc, error, ret_ip),
TP_STRUCT__entry( TP_STRUCT__entry(