2018-07-30 12:37:09 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2018 Oracle. All Rights Reserved.
|
|
|
|
* Author: Darrick J. Wong <darrick.wong@oracle.com>
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
|
|
|
#include "xfs_shared.h"
|
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
|
|
|
#include "xfs_mount.h"
|
2018-08-10 12:43:02 +07:00
|
|
|
#include "xfs_btree.h"
|
2018-07-30 12:37:09 +07:00
|
|
|
#include "scrub/xfs_scrub.h"
|
|
|
|
#include "scrub/scrub.h"
|
|
|
|
#include "scrub/common.h"
|
|
|
|
#include "scrub/trace.h"
|
|
|
|
#include "scrub/repair.h"
|
|
|
|
#include "scrub/bitmap.h"
|
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
/*
|
|
|
|
* Set a range of this bitmap. Caller must ensure the range is not set.
|
|
|
|
*
|
|
|
|
* This is the logical equivalent of bitmap |= mask(start, len).
|
|
|
|
*/
|
2018-07-30 12:37:09 +07:00
|
|
|
int
|
2018-07-31 01:18:13 +07:00
|
|
|
xfs_bitmap_set(
|
|
|
|
struct xfs_bitmap *bitmap,
|
|
|
|
uint64_t start,
|
|
|
|
uint64_t len)
|
2018-07-30 12:37:09 +07:00
|
|
|
{
|
2018-07-31 01:18:13 +07:00
|
|
|
struct xfs_bitmap_range *bmr;
|
2018-07-30 12:37:09 +07:00
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
|
|
|
|
if (!bmr)
|
2018-07-30 12:37:09 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
INIT_LIST_HEAD(&bmr->list);
|
|
|
|
bmr->start = start;
|
|
|
|
bmr->len = len;
|
|
|
|
list_add_tail(&bmr->list, &bitmap->list);
|
2018-07-30 12:37:09 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
/* Free everything related to this bitmap. */
|
2018-07-30 12:37:09 +07:00
|
|
|
void
|
2018-07-31 01:18:13 +07:00
|
|
|
xfs_bitmap_destroy(
|
|
|
|
struct xfs_bitmap *bitmap)
|
2018-07-30 12:37:09 +07:00
|
|
|
{
|
2018-07-31 01:18:13 +07:00
|
|
|
struct xfs_bitmap_range *bmr;
|
|
|
|
struct xfs_bitmap_range *n;
|
2018-07-30 12:37:09 +07:00
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
for_each_xfs_bitmap_extent(bmr, n, bitmap) {
|
|
|
|
list_del(&bmr->list);
|
|
|
|
kmem_free(bmr);
|
2018-07-30 12:37:09 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
/* Set up a per-AG block bitmap. */
|
|
|
|
void
|
|
|
|
xfs_bitmap_init(
|
|
|
|
struct xfs_bitmap *bitmap)
|
|
|
|
{
|
|
|
|
INIT_LIST_HEAD(&bitmap->list);
|
|
|
|
}
|
|
|
|
|
2018-07-30 12:37:09 +07:00
|
|
|
/* Compare two btree extents. */
|
|
|
|
static int
|
2018-07-31 01:18:13 +07:00
|
|
|
xfs_bitmap_range_cmp(
|
2018-07-30 12:37:09 +07:00
|
|
|
void *priv,
|
|
|
|
struct list_head *a,
|
|
|
|
struct list_head *b)
|
|
|
|
{
|
2018-07-31 01:18:13 +07:00
|
|
|
struct xfs_bitmap_range *ap;
|
|
|
|
struct xfs_bitmap_range *bp;
|
2018-07-30 12:37:09 +07:00
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
ap = container_of(a, struct xfs_bitmap_range, list);
|
|
|
|
bp = container_of(b, struct xfs_bitmap_range, list);
|
2018-07-30 12:37:09 +07:00
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
if (ap->start > bp->start)
|
2018-07-30 12:37:09 +07:00
|
|
|
return 1;
|
2018-07-31 01:18:13 +07:00
|
|
|
if (ap->start < bp->start)
|
2018-07-30 12:37:09 +07:00
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-07-31 01:18:13 +07:00
|
|
|
* Remove all the blocks mentioned in @sub from the extents in @bitmap.
|
2018-07-30 12:37:09 +07:00
|
|
|
*
|
|
|
|
* The intent is that callers will iterate the rmapbt for all of its records
|
2018-07-31 01:18:13 +07:00
|
|
|
* for a given owner to generate @bitmap; and iterate all the blocks of the
|
2018-07-30 12:37:09 +07:00
|
|
|
* metadata structures that are not being rebuilt and have the same rmapbt
|
2018-07-31 01:18:13 +07:00
|
|
|
* owner to generate @sub. This routine subtracts all the extents
|
|
|
|
* mentioned in sub from all the extents linked in @bitmap, which leaves
|
|
|
|
* @bitmap as the list of blocks that are not accounted for, which we assume
|
2018-07-30 12:37:09 +07:00
|
|
|
* are the dead blocks of the old metadata structure. The blocks mentioned in
|
2018-07-31 01:18:13 +07:00
|
|
|
* @bitmap can be reaped.
|
|
|
|
*
|
|
|
|
* This is the logical equivalent of bitmap &= ~sub.
|
2018-07-30 12:37:09 +07:00
|
|
|
*/
|
|
|
|
#define LEFT_ALIGNED (1 << 0)
|
|
|
|
#define RIGHT_ALIGNED (1 << 1)
|
|
|
|
int
|
2018-07-31 01:18:13 +07:00
|
|
|
xfs_bitmap_disunion(
|
|
|
|
struct xfs_bitmap *bitmap,
|
|
|
|
struct xfs_bitmap *sub)
|
2018-07-30 12:37:09 +07:00
|
|
|
{
|
|
|
|
struct list_head *lp;
|
2018-07-31 01:18:13 +07:00
|
|
|
struct xfs_bitmap_range *br;
|
|
|
|
struct xfs_bitmap_range *new_br;
|
|
|
|
struct xfs_bitmap_range *sub_br;
|
|
|
|
uint64_t sub_start;
|
|
|
|
uint64_t sub_len;
|
2018-07-30 12:37:09 +07:00
|
|
|
int state;
|
|
|
|
int error = 0;
|
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
if (list_empty(&bitmap->list) || list_empty(&sub->list))
|
2018-07-30 12:37:09 +07:00
|
|
|
return 0;
|
2018-07-31 01:18:13 +07:00
|
|
|
ASSERT(!list_empty(&sub->list));
|
2018-07-30 12:37:09 +07:00
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
|
|
|
|
list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
|
2018-07-30 12:37:09 +07:00
|
|
|
|
|
|
|
/*
|
2018-07-31 01:18:13 +07:00
|
|
|
* Now that we've sorted both lists, we iterate bitmap once, rolling
|
|
|
|
* forward through sub and/or bitmap as necessary until we find an
|
2018-07-30 12:37:09 +07:00
|
|
|
* overlap or reach the end of either list. We do not reset lp to the
|
2018-07-31 01:18:13 +07:00
|
|
|
* head of bitmap nor do we reset sub_br to the head of sub. The
|
2018-07-30 12:37:09 +07:00
|
|
|
* list traversal is similar to merge sort, but we're deleting
|
|
|
|
* instead. In this manner we avoid O(n^2) operations.
|
|
|
|
*/
|
2018-07-31 01:18:13 +07:00
|
|
|
sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
|
2018-07-30 12:37:09 +07:00
|
|
|
list);
|
2018-07-31 01:18:13 +07:00
|
|
|
lp = bitmap->list.next;
|
|
|
|
while (lp != &bitmap->list) {
|
|
|
|
br = list_entry(lp, struct xfs_bitmap_range, list);
|
2018-07-30 12:37:09 +07:00
|
|
|
|
|
|
|
/*
|
2018-07-31 01:18:13 +07:00
|
|
|
* Advance sub_br and/or br until we find a pair that
|
2018-07-30 12:37:09 +07:00
|
|
|
* intersect or we run out of extents.
|
|
|
|
*/
|
2018-07-31 01:18:13 +07:00
|
|
|
while (sub_br->start + sub_br->len <= br->start) {
|
|
|
|
if (list_is_last(&sub_br->list, &sub->list))
|
2018-07-30 12:37:09 +07:00
|
|
|
goto out;
|
2018-07-31 01:18:13 +07:00
|
|
|
sub_br = list_next_entry(sub_br, list);
|
2018-07-30 12:37:09 +07:00
|
|
|
}
|
2018-07-31 01:18:13 +07:00
|
|
|
if (sub_br->start >= br->start + br->len) {
|
2018-07-30 12:37:09 +07:00
|
|
|
lp = lp->next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-07-31 01:18:13 +07:00
|
|
|
/* trim sub_br to fit the extent we have */
|
|
|
|
sub_start = sub_br->start;
|
|
|
|
sub_len = sub_br->len;
|
|
|
|
if (sub_br->start < br->start) {
|
|
|
|
sub_len -= br->start - sub_br->start;
|
|
|
|
sub_start = br->start;
|
2018-07-30 12:37:09 +07:00
|
|
|
}
|
2018-07-31 01:18:13 +07:00
|
|
|
if (sub_len > br->len)
|
|
|
|
sub_len = br->len;
|
2018-07-30 12:37:09 +07:00
|
|
|
|
|
|
|
state = 0;
|
2018-07-31 01:18:13 +07:00
|
|
|
if (sub_start == br->start)
|
2018-07-30 12:37:09 +07:00
|
|
|
state |= LEFT_ALIGNED;
|
2018-07-31 01:18:13 +07:00
|
|
|
if (sub_start + sub_len == br->start + br->len)
|
2018-07-30 12:37:09 +07:00
|
|
|
state |= RIGHT_ALIGNED;
|
|
|
|
switch (state) {
|
|
|
|
case LEFT_ALIGNED:
|
|
|
|
/* Coincides with only the left. */
|
2018-07-31 01:18:13 +07:00
|
|
|
br->start += sub_len;
|
|
|
|
br->len -= sub_len;
|
2018-07-30 12:37:09 +07:00
|
|
|
break;
|
|
|
|
case RIGHT_ALIGNED:
|
|
|
|
/* Coincides with only the right. */
|
2018-07-31 01:18:13 +07:00
|
|
|
br->len -= sub_len;
|
2018-07-30 12:37:09 +07:00
|
|
|
lp = lp->next;
|
|
|
|
break;
|
|
|
|
case LEFT_ALIGNED | RIGHT_ALIGNED:
|
|
|
|
/* Total overlap, just delete ex. */
|
|
|
|
lp = lp->next;
|
2018-07-31 01:18:13 +07:00
|
|
|
list_del(&br->list);
|
|
|
|
kmem_free(br);
|
2018-07-30 12:37:09 +07:00
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
/*
|
|
|
|
* Deleting from the middle: add the new right extent
|
|
|
|
* and then shrink the left extent.
|
|
|
|
*/
|
2018-07-31 01:18:13 +07:00
|
|
|
new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
|
2018-07-30 12:37:09 +07:00
|
|
|
KM_MAYFAIL);
|
2018-07-31 01:18:13 +07:00
|
|
|
if (!new_br) {
|
2018-07-30 12:37:09 +07:00
|
|
|
error = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2018-07-31 01:18:13 +07:00
|
|
|
INIT_LIST_HEAD(&new_br->list);
|
|
|
|
new_br->start = sub_start + sub_len;
|
|
|
|
new_br->len = br->start + br->len - new_br->start;
|
|
|
|
list_add(&new_br->list, &br->list);
|
|
|
|
br->len = sub_start - br->start;
|
2018-07-30 12:37:09 +07:00
|
|
|
lp = lp->next;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
#undef LEFT_ALIGNED
|
|
|
|
#undef RIGHT_ALIGNED
|
2018-08-10 12:43:02 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Record all btree blocks seen while iterating all records of a btree.
|
|
|
|
*
|
|
|
|
* We know that the btree query_all function starts at the left edge and walks
|
|
|
|
* towards the right edge of the tree. Therefore, we know that we can walk up
|
|
|
|
* the btree cursor towards the root; if the pointer for a given level points
|
|
|
|
* to the first record/key in that block, we haven't seen this block before;
|
|
|
|
* and therefore we need to remember that we saw this block in the btree.
|
|
|
|
*
|
|
|
|
* So if our btree is:
|
|
|
|
*
|
|
|
|
* 4
|
|
|
|
* / | \
|
|
|
|
* 1 2 3
|
|
|
|
*
|
|
|
|
* Pretend for this example that each leaf block has 100 btree records. For
|
|
|
|
* the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
|
|
|
|
* that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
|
|
|
|
* block 4. The list is [1, 4].
|
|
|
|
*
|
|
|
|
* For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
|
|
|
|
* loop. The list remains [1, 4].
|
|
|
|
*
|
|
|
|
* For the 101st btree record, we've moved onto leaf block 2. Now
|
|
|
|
* bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
|
|
|
|
* bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
|
|
|
|
*
|
|
|
|
* For the 102nd record, bc_ptrs[0] == 2, so we continue.
|
|
|
|
*
|
|
|
|
* For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
|
|
|
|
* we add 3 to the list. Now it is [1, 4, 2, 3].
|
|
|
|
*
|
|
|
|
* For the 300th record we just exit, with the list being [1, 4, 2, 3].
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record all the buffers pointed to by the btree cursor. Callers already
|
|
|
|
* engaged in a btree walk should call this function to capture the list of
|
|
|
|
* blocks going from the leaf towards the root.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_bitmap_set_btcur_path(
|
|
|
|
struct xfs_bitmap *bitmap,
|
|
|
|
struct xfs_btree_cur *cur)
|
|
|
|
{
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
xfs_fsblock_t fsb;
|
|
|
|
int i;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
|
|
|
|
xfs_btree_get_block(cur, i, &bp);
|
|
|
|
if (!bp)
|
|
|
|
continue;
|
|
|
|
fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
|
|
|
|
error = xfs_bitmap_set(bitmap, fsb, 1);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Collect a btree's block in the bitmap. */
|
|
|
|
STATIC int
|
|
|
|
xfs_bitmap_collect_btblock(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
int level,
|
|
|
|
void *priv)
|
|
|
|
{
|
|
|
|
struct xfs_bitmap *bitmap = priv;
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
xfs_fsblock_t fsbno;
|
|
|
|
|
|
|
|
xfs_btree_get_block(cur, level, &bp);
|
|
|
|
if (!bp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
|
|
|
|
return xfs_bitmap_set(bitmap, fsbno, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Walk the btree and mark the bitmap wherever a btree block is found. */
|
|
|
|
int
|
|
|
|
xfs_bitmap_set_btblocks(
|
|
|
|
struct xfs_bitmap *bitmap,
|
|
|
|
struct xfs_btree_cur *cur)
|
|
|
|
{
|
|
|
|
return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
|
|
|
|
}
|