mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 09:46:43 +07:00
bcache: Prune struct btree_op
Eventual goal is for struct btree_op to contain only what is necessary for traversing the btree. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
cc23196631
commit
c18536a72d
@ -1197,7 +1197,6 @@ int bch_bset_print_stats(struct cache_set *c, char *buf)
|
||||
|
||||
memset(&t, 0, sizeof(struct bset_stats));
|
||||
bch_btree_op_init_stack(&t.op);
|
||||
t.op.c = c;
|
||||
|
||||
ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
|
||||
if (ret < 0)
|
||||
|
@ -503,7 +503,7 @@ static void btree_node_write_work(struct work_struct *w)
|
||||
rw_unlock(true, b);
|
||||
}
|
||||
|
||||
static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
|
||||
static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
|
||||
{
|
||||
struct bset *i = b->sets[b->nsets].data;
|
||||
struct btree_write *w = btree_current_write(b);
|
||||
@ -516,15 +516,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
|
||||
|
||||
set_btree_node_dirty(b);
|
||||
|
||||
if (op->journal) {
|
||||
if (journal_ref) {
|
||||
if (w->journal &&
|
||||
journal_pin_cmp(b->c, w, op)) {
|
||||
journal_pin_cmp(b->c, w->journal, journal_ref)) {
|
||||
atomic_dec_bug(w->journal);
|
||||
w->journal = NULL;
|
||||
}
|
||||
|
||||
if (!w->journal) {
|
||||
w->journal = op->journal;
|
||||
w->journal = journal_ref;
|
||||
atomic_inc(w->journal);
|
||||
}
|
||||
}
|
||||
@ -1663,13 +1663,16 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch_btree_check(struct cache_set *c, struct btree_op *op)
|
||||
int bch_btree_check(struct cache_set *c)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
unsigned i;
|
||||
unsigned long *seen[MAX_CACHES_PER_SET];
|
||||
struct btree_op op;
|
||||
|
||||
memset(seen, 0, sizeof(seen));
|
||||
bch_btree_op_init_stack(&op);
|
||||
op.lock = SHRT_MAX;
|
||||
|
||||
for (i = 0; c->cache[i]; i++) {
|
||||
size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
|
||||
@ -1681,7 +1684,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op)
|
||||
memset(seen[i], 0xFF, n);
|
||||
}
|
||||
|
||||
ret = btree_root(check_recurse, c, op, seen);
|
||||
ret = btree_root(check_recurse, c, &op, seen);
|
||||
err:
|
||||
for (i = 0; i < MAX_CACHES_PER_SET; i++)
|
||||
kfree(seen[i]);
|
||||
@ -2091,7 +2094,8 @@ static int btree_split(struct btree *b, struct btree_op *op,
|
||||
}
|
||||
|
||||
static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
|
||||
struct keylist *insert_keys)
|
||||
struct keylist *insert_keys,
|
||||
atomic_t *journal_ref)
|
||||
{
|
||||
int ret = 0;
|
||||
struct keylist split_keys;
|
||||
@ -2123,7 +2127,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
|
||||
|
||||
if (bch_btree_insert_keys(b, op, insert_keys)) {
|
||||
if (!b->level)
|
||||
bch_btree_leaf_dirty(b, op);
|
||||
bch_btree_leaf_dirty(b, journal_ref);
|
||||
else
|
||||
bch_btree_node_write(b, &op->cl);
|
||||
}
|
||||
@ -2162,7 +2166,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
|
||||
|
||||
BUG_ON(op->type != BTREE_INSERT);
|
||||
|
||||
ret = bch_btree_insert_node(b, op, &insert);
|
||||
ret = bch_btree_insert_node(b, op, &insert, NULL);
|
||||
|
||||
BUG_ON(!ret && !bch_keylist_empty(&insert));
|
||||
out:
|
||||
@ -2172,7 +2176,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
|
||||
}
|
||||
|
||||
static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
|
||||
struct keylist *keys)
|
||||
struct keylist *keys, atomic_t *journal_ref)
|
||||
{
|
||||
if (bch_keylist_empty(keys))
|
||||
return 0;
|
||||
@ -2189,14 +2193,14 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return btree(insert_recurse, k, b, op, keys);
|
||||
return btree(insert_recurse, k, b, op, keys, journal_ref);
|
||||
} else {
|
||||
return bch_btree_insert_node(b, op, keys);
|
||||
return bch_btree_insert_node(b, op, keys, journal_ref);
|
||||
}
|
||||
}
|
||||
|
||||
int bch_btree_insert(struct btree_op *op, struct cache_set *c,
|
||||
struct keylist *keys)
|
||||
struct keylist *keys, atomic_t *journal_ref)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -2210,7 +2214,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
|
||||
|
||||
while (!bch_keylist_empty(keys)) {
|
||||
op->lock = 0;
|
||||
ret = btree_root(insert_recurse, c, op, keys);
|
||||
ret = btree_root(insert_recurse, c, op, keys, journal_ref);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
ret = 0;
|
||||
|
@ -238,17 +238,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k);
|
||||
|
||||
struct btree_op {
|
||||
struct closure cl;
|
||||
struct cache_set *c;
|
||||
|
||||
/* Journal entry we have a refcount on */
|
||||
atomic_t *journal;
|
||||
|
||||
/* Bio to be inserted into the cache */
|
||||
struct bio *cache_bio;
|
||||
|
||||
unsigned inode;
|
||||
|
||||
uint16_t write_prio;
|
||||
|
||||
/* Btree level at which we start taking write locks */
|
||||
short lock;
|
||||
@ -259,11 +248,6 @@ struct btree_op {
|
||||
BTREE_REPLACE
|
||||
} type:8;
|
||||
|
||||
unsigned csum:1;
|
||||
unsigned bypass:1;
|
||||
unsigned flush_journal:1;
|
||||
|
||||
unsigned insert_data_done:1;
|
||||
unsigned insert_collision:1;
|
||||
|
||||
BKEY_PADDED(replace);
|
||||
@ -303,12 +287,13 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
|
||||
|
||||
int bch_btree_insert_check_key(struct btree *, struct btree_op *,
|
||||
struct bkey *);
|
||||
int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *);
|
||||
int bch_btree_insert(struct btree_op *, struct cache_set *,
|
||||
struct keylist *, atomic_t *);
|
||||
|
||||
int bch_gc_thread_start(struct cache_set *);
|
||||
size_t bch_btree_gc_finish(struct cache_set *);
|
||||
void bch_moving_gc(struct cache_set *);
|
||||
int bch_btree_check(struct cache_set *, struct btree_op *);
|
||||
int bch_btree_check(struct cache_set *);
|
||||
uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
|
||||
|
||||
static inline void wake_up_gc(struct cache_set *c)
|
||||
|
@ -30,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
|
||||
}
|
||||
|
||||
static int journal_read_bucket(struct cache *ca, struct list_head *list,
|
||||
struct btree_op *op, unsigned bucket_index)
|
||||
unsigned bucket_index)
|
||||
{
|
||||
struct journal_device *ja = &ca->journal;
|
||||
struct bio *bio = &ja->bio;
|
||||
|
||||
struct journal_replay *i;
|
||||
struct jset *j, *data = ca->set->journal.w[0].data;
|
||||
struct closure cl;
|
||||
unsigned len, left, offset = 0;
|
||||
int ret = 0;
|
||||
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
|
||||
|
||||
closure_init_stack(&cl);
|
||||
|
||||
pr_debug("reading %llu", (uint64_t) bucket);
|
||||
|
||||
while (offset < ca->sb.bucket_size) {
|
||||
@ -54,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset;
|
||||
bio->bi_size = len << 9;
|
||||
|
||||
bio->bi_end_io = journal_read_endio;
|
||||
bio->bi_private = &op->cl;
|
||||
bio->bi_private = &cl;
|
||||
bch_bio_map(bio, data);
|
||||
|
||||
closure_bio_submit(bio, &op->cl, ca);
|
||||
closure_sync(&op->cl);
|
||||
closure_bio_submit(bio, &cl, ca);
|
||||
closure_sync(&cl);
|
||||
|
||||
/* This function could be simpler now since we no longer write
|
||||
* journal entries that overlap bucket boundaries; this means
|
||||
@ -128,12 +131,11 @@ reread: left = ca->sb.bucket_size - offset;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch_journal_read(struct cache_set *c, struct list_head *list,
|
||||
struct btree_op *op)
|
||||
int bch_journal_read(struct cache_set *c, struct list_head *list)
|
||||
{
|
||||
#define read_bucket(b) \
|
||||
({ \
|
||||
int ret = journal_read_bucket(ca, list, op, b); \
|
||||
int ret = journal_read_bucket(ca, list, b); \
|
||||
__set_bit(b, bitmap); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
@ -291,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
|
||||
}
|
||||
}
|
||||
|
||||
int bch_journal_replay(struct cache_set *s, struct list_head *list,
|
||||
struct btree_op *op)
|
||||
int bch_journal_replay(struct cache_set *s, struct list_head *list)
|
||||
{
|
||||
int ret = 0, keys = 0, entries = 0;
|
||||
struct bkey *k;
|
||||
@ -301,8 +302,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
|
||||
|
||||
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
|
||||
struct keylist keylist;
|
||||
struct btree_op op;
|
||||
|
||||
bch_keylist_init(&keylist);
|
||||
bch_btree_op_init_stack(&op);
|
||||
op.lock = SHRT_MAX;
|
||||
|
||||
list_for_each_entry(i, list, list) {
|
||||
BUG_ON(i->pin && atomic_read(i->pin) != 1);
|
||||
@ -319,9 +323,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
|
||||
bkey_copy(keylist.top, k);
|
||||
bch_keylist_push(&keylist);
|
||||
|
||||
op->journal = i->pin;
|
||||
|
||||
ret = bch_btree_insert(op, s, &keylist);
|
||||
ret = bch_btree_insert(&op, s, &keylist, i->pin);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -346,7 +348,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
|
||||
kfree(i);
|
||||
}
|
||||
err:
|
||||
closure_sync(&op->cl);
|
||||
closure_sync(&op.cl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -368,8 +370,8 @@ static void btree_flush_write(struct cache_set *c)
|
||||
if (!best)
|
||||
best = b;
|
||||
else if (journal_pin_cmp(c,
|
||||
btree_current_write(best),
|
||||
btree_current_write(b))) {
|
||||
btree_current_write(best)->journal,
|
||||
btree_current_write(b)->journal)) {
|
||||
best = b;
|
||||
}
|
||||
}
|
||||
|
@ -189,8 +189,7 @@ struct journal_device {
|
||||
};
|
||||
|
||||
#define journal_pin_cmp(c, l, r) \
|
||||
(fifo_idx(&(c)->journal.pin, (l)->journal) > \
|
||||
fifo_idx(&(c)->journal.pin, (r)->journal))
|
||||
(fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
|
||||
|
||||
#define JOURNAL_PIN 20000
|
||||
|
||||
@ -206,10 +205,8 @@ atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
|
||||
void bch_journal_next(struct journal *);
|
||||
void bch_journal_mark(struct cache_set *, struct list_head *);
|
||||
void bch_journal_meta(struct cache_set *, struct closure *);
|
||||
int bch_journal_read(struct cache_set *, struct list_head *,
|
||||
struct btree_op *);
|
||||
int bch_journal_replay(struct cache_set *, struct list_head *,
|
||||
struct btree_op *);
|
||||
int bch_journal_read(struct cache_set *, struct list_head *);
|
||||
int bch_journal_replay(struct cache_set *, struct list_head *);
|
||||
|
||||
void bch_journal_free(struct cache_set *);
|
||||
int bch_journal_alloc(struct cache_set *);
|
||||
|
@ -55,9 +55,9 @@ static void write_moving_finish(struct closure *cl)
|
||||
if (io->s.op.insert_collision)
|
||||
trace_bcache_gc_copy_collision(&io->w->key);
|
||||
|
||||
bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
|
||||
bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
|
||||
|
||||
up(&io->s.op.c->moving_in_flight);
|
||||
up(&io->s.c->moving_in_flight);
|
||||
|
||||
closure_return_with_destructor(cl, moving_io_destructor);
|
||||
}
|
||||
@ -70,7 +70,7 @@ static void read_moving_endio(struct bio *bio, int error)
|
||||
if (error)
|
||||
io->s.error = error;
|
||||
|
||||
bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
|
||||
bch_bbio_endio(io->s.c, bio, error, "reading data to move");
|
||||
}
|
||||
|
||||
static void moving_init(struct moving_io *io)
|
||||
@ -99,11 +99,11 @@ static void write_moving(struct closure *cl)
|
||||
|
||||
io->bio.bio.bi_sector = KEY_START(&io->w->key);
|
||||
s->op.lock = -1;
|
||||
s->op.write_prio = 1;
|
||||
s->op.cache_bio = &io->bio.bio;
|
||||
s->write_prio = 1;
|
||||
s->cache_bio = &io->bio.bio;
|
||||
|
||||
s->writeback = KEY_DIRTY(&io->w->key);
|
||||
s->op.csum = KEY_CSUM(&io->w->key);
|
||||
s->csum = KEY_CSUM(&io->w->key);
|
||||
|
||||
s->op.type = BTREE_REPLACE;
|
||||
bkey_copy(&s->op.replace, &io->w->key);
|
||||
@ -121,7 +121,7 @@ static void read_moving_submit(struct closure *cl)
|
||||
struct moving_io *io = container_of(s, struct moving_io, s);
|
||||
struct bio *bio = &io->bio.bio;
|
||||
|
||||
bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
|
||||
bch_submit_bbio(bio, s->c, &io->w->key, 0);
|
||||
|
||||
continue_at(cl, write_moving, system_wq);
|
||||
}
|
||||
@ -151,8 +151,8 @@ static void read_moving(struct cache_set *c)
|
||||
|
||||
w->private = io;
|
||||
io->w = w;
|
||||
io->s.op.inode = KEY_INODE(&w->key);
|
||||
io->s.op.c = c;
|
||||
io->s.inode = KEY_INODE(&w->key);
|
||||
io->s.c = c;
|
||||
|
||||
moving_init(io);
|
||||
bio = &io->bio.bio;
|
||||
|
@ -217,6 +217,7 @@ static void bch_data_insert_keys(struct closure *cl)
|
||||
{
|
||||
struct btree_op *op = container_of(cl, struct btree_op, cl);
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
atomic_t *journal_ref = NULL;
|
||||
|
||||
/*
|
||||
* If we're looping, might already be waiting on
|
||||
@ -231,20 +232,19 @@ static void bch_data_insert_keys(struct closure *cl)
|
||||
#endif
|
||||
|
||||
if (s->write)
|
||||
op->journal = bch_journal(op->c, &s->insert_keys,
|
||||
op->flush_journal
|
||||
journal_ref = bch_journal(s->c, &s->insert_keys,
|
||||
s->flush_journal
|
||||
? &s->cl : NULL);
|
||||
|
||||
if (bch_btree_insert(op, op->c, &s->insert_keys)) {
|
||||
if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) {
|
||||
s->error = -ENOMEM;
|
||||
op->insert_data_done = true;
|
||||
s->insert_data_done = true;
|
||||
}
|
||||
|
||||
if (op->journal)
|
||||
atomic_dec_bug(op->journal);
|
||||
op->journal = NULL;
|
||||
if (journal_ref)
|
||||
atomic_dec_bug(journal_ref);
|
||||
|
||||
if (!op->insert_data_done)
|
||||
if (!s->insert_data_done)
|
||||
continue_at(cl, bch_data_insert_start, bcache_wq);
|
||||
|
||||
bch_keylist_free(&s->insert_keys);
|
||||
@ -347,7 +347,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
|
||||
static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
|
||||
struct search *s)
|
||||
{
|
||||
struct cache_set *c = s->op.c;
|
||||
struct cache_set *c = s->c;
|
||||
struct open_bucket *b;
|
||||
BKEY_PADDED(key) alloc;
|
||||
unsigned i;
|
||||
@ -363,7 +363,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
|
||||
spin_lock(&c->data_bucket_lock);
|
||||
|
||||
while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
|
||||
unsigned watermark = s->op.write_prio
|
||||
unsigned watermark = s->write_prio
|
||||
? WATERMARK_MOVINGGC
|
||||
: WATERMARK_NONE;
|
||||
|
||||
@ -435,7 +435,7 @@ static void bch_data_invalidate(struct closure *cl)
|
||||
{
|
||||
struct btree_op *op = container_of(cl, struct btree_op, cl);
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
struct bio *bio = op->cache_bio;
|
||||
struct bio *bio = s->cache_bio;
|
||||
|
||||
pr_debug("invalidating %i sectors from %llu",
|
||||
bio_sectors(bio), (uint64_t) bio->bi_sector);
|
||||
@ -443,17 +443,17 @@ static void bch_data_invalidate(struct closure *cl)
|
||||
while (bio_sectors(bio)) {
|
||||
unsigned len = min(bio_sectors(bio), 1U << 14);
|
||||
|
||||
if (bch_keylist_realloc(&s->insert_keys, 0, op->c))
|
||||
if (bch_keylist_realloc(&s->insert_keys, 0, s->c))
|
||||
goto out;
|
||||
|
||||
bio->bi_sector += len;
|
||||
bio->bi_size -= len << 9;
|
||||
|
||||
bch_keylist_add(&s->insert_keys,
|
||||
&KEY(op->inode, bio->bi_sector, len));
|
||||
&KEY(s->inode, bio->bi_sector, len));
|
||||
}
|
||||
|
||||
op->insert_data_done = true;
|
||||
s->insert_data_done = true;
|
||||
bio_put(bio);
|
||||
out:
|
||||
continue_at(cl, bch_data_insert_keys, bcache_wq);
|
||||
@ -506,21 +506,21 @@ static void bch_data_insert_endio(struct bio *bio, int error)
|
||||
set_closure_fn(cl, NULL, NULL);
|
||||
}
|
||||
|
||||
bch_bbio_endio(op->c, bio, error, "writing data to cache");
|
||||
bch_bbio_endio(s->c, bio, error, "writing data to cache");
|
||||
}
|
||||
|
||||
static void bch_data_insert_start(struct closure *cl)
|
||||
{
|
||||
struct btree_op *op = container_of(cl, struct btree_op, cl);
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
struct bio *bio = op->cache_bio, *n;
|
||||
struct bio *bio = s->cache_bio, *n;
|
||||
|
||||
if (op->bypass)
|
||||
if (s->bypass)
|
||||
return bch_data_invalidate(cl);
|
||||
|
||||
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
|
||||
set_gc_sectors(op->c);
|
||||
wake_up_gc(op->c);
|
||||
if (atomic_sub_return(bio_sectors(bio), &s->c->sectors_to_gc) < 0) {
|
||||
set_gc_sectors(s->c);
|
||||
wake_up_gc(s->c);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -533,17 +533,17 @@ static void bch_data_insert_start(struct closure *cl)
|
||||
unsigned i;
|
||||
struct bkey *k;
|
||||
struct bio_set *split = s->d
|
||||
? s->d->bio_split : op->c->bio_split;
|
||||
? s->d->bio_split : s->c->bio_split;
|
||||
|
||||
/* 1 for the device pointer and 1 for the chksum */
|
||||
if (bch_keylist_realloc(&s->insert_keys,
|
||||
1 + (op->csum ? 1 : 0),
|
||||
op->c))
|
||||
1 + (s->csum ? 1 : 0),
|
||||
s->c))
|
||||
continue_at(cl, bch_data_insert_keys, bcache_wq);
|
||||
|
||||
k = s->insert_keys.top;
|
||||
bkey_init(k);
|
||||
SET_KEY_INODE(k, op->inode);
|
||||
SET_KEY_INODE(k, s->inode);
|
||||
SET_KEY_OFFSET(k, bio->bi_sector);
|
||||
|
||||
if (!bch_alloc_sectors(k, bio_sectors(bio), s))
|
||||
@ -558,11 +558,11 @@ static void bch_data_insert_start(struct closure *cl)
|
||||
SET_KEY_DIRTY(k, true);
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
SET_GC_MARK(PTR_BUCKET(op->c, k, i),
|
||||
SET_GC_MARK(PTR_BUCKET(s->c, k, i),
|
||||
GC_MARK_DIRTY);
|
||||
}
|
||||
|
||||
SET_KEY_CSUM(k, op->csum);
|
||||
SET_KEY_CSUM(k, s->csum);
|
||||
if (KEY_CSUM(k))
|
||||
bio_csum(n, k);
|
||||
|
||||
@ -570,10 +570,10 @@ static void bch_data_insert_start(struct closure *cl)
|
||||
bch_keylist_push(&s->insert_keys);
|
||||
|
||||
n->bi_rw |= REQ_WRITE;
|
||||
bch_submit_bbio(n, op->c, k, 0);
|
||||
bch_submit_bbio(n, s->c, k, 0);
|
||||
} while (n != bio);
|
||||
|
||||
op->insert_data_done = true;
|
||||
s->insert_data_done = true;
|
||||
continue_at(cl, bch_data_insert_keys, bcache_wq);
|
||||
err:
|
||||
/* bch_alloc_sectors() blocks if s->writeback = true */
|
||||
@ -592,14 +592,14 @@ static void bch_data_insert_start(struct closure *cl)
|
||||
* we wait for buckets to be freed up, so just invalidate the
|
||||
* rest of the write.
|
||||
*/
|
||||
op->bypass = true;
|
||||
s->bypass = true;
|
||||
return bch_data_invalidate(cl);
|
||||
} else {
|
||||
/*
|
||||
* From a cache miss, we can just insert the keys for the data
|
||||
* we have written or bail out if we didn't do anything.
|
||||
*/
|
||||
op->insert_data_done = true;
|
||||
s->insert_data_done = true;
|
||||
bio_put(bio);
|
||||
|
||||
if (!bch_keylist_empty(&s->insert_keys))
|
||||
@ -622,11 +622,11 @@ static void bch_data_insert_start(struct closure *cl)
|
||||
* data is written it calls bch_journal, and after the keys have been added to
|
||||
* the next journal write they're inserted into the btree.
|
||||
*
|
||||
* It inserts the data in op->cache_bio; bi_sector is used for the key offset,
|
||||
* It inserts the data in s->cache_bio; bi_sector is used for the key offset,
|
||||
* and op->inode is used for the key inode.
|
||||
*
|
||||
* If op->bypass is true, instead of inserting the data it invalidates the
|
||||
* region of the cache represented by op->cache_bio and op->inode.
|
||||
* If s->bypass is true, instead of inserting the data it invalidates the
|
||||
* region of the cache represented by s->cache_bio and op->inode.
|
||||
*/
|
||||
void bch_data_insert(struct closure *cl)
|
||||
{
|
||||
@ -634,7 +634,7 @@ void bch_data_insert(struct closure *cl)
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
|
||||
bch_keylist_init(&s->insert_keys);
|
||||
bio_get(op->cache_bio);
|
||||
bio_get(s->cache_bio);
|
||||
bch_data_insert_start(cl);
|
||||
}
|
||||
|
||||
@ -655,12 +655,12 @@ static void bch_cache_read_endio(struct bio *bio, int error)
|
||||
|
||||
if (error)
|
||||
s->error = error;
|
||||
else if (ptr_stale(s->op.c, &b->key, 0)) {
|
||||
atomic_long_inc(&s->op.c->cache_read_races);
|
||||
else if (ptr_stale(s->c, &b->key, 0)) {
|
||||
atomic_long_inc(&s->c->cache_read_races);
|
||||
s->error = -EINTR;
|
||||
}
|
||||
|
||||
bch_bbio_endio(s->op.c, bio, error, "reading from cache");
|
||||
bch_bbio_endio(s->c, bio, error, "reading from cache");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -674,13 +674,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
|
||||
struct bkey *bio_key;
|
||||
unsigned ptr;
|
||||
|
||||
if (bkey_cmp(k, &KEY(op->inode, bio->bi_sector, 0)) <= 0)
|
||||
if (bkey_cmp(k, &KEY(s->inode, bio->bi_sector, 0)) <= 0)
|
||||
return MAP_CONTINUE;
|
||||
|
||||
if (KEY_INODE(k) != s->op.inode ||
|
||||
if (KEY_INODE(k) != s->inode ||
|
||||
KEY_START(k) > bio->bi_sector) {
|
||||
unsigned bio_sectors = bio_sectors(bio);
|
||||
unsigned sectors = KEY_INODE(k) == s->op.inode
|
||||
unsigned sectors = KEY_INODE(k) == s->inode
|
||||
? min_t(uint64_t, INT_MAX,
|
||||
KEY_START(k) - bio->bi_sector)
|
||||
: INT_MAX;
|
||||
@ -708,8 +708,8 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
|
||||
bio_key = &container_of(n, struct bbio, bio)->key;
|
||||
bch_bkey_copy_single_ptr(bio_key, k, ptr);
|
||||
|
||||
bch_cut_front(&KEY(s->op.inode, n->bi_sector, 0), bio_key);
|
||||
bch_cut_back(&KEY(s->op.inode, bio_end_sector(n), 0), bio_key);
|
||||
bch_cut_front(&KEY(s->inode, n->bi_sector, 0), bio_key);
|
||||
bch_cut_back(&KEY(s->inode, bio_end_sector(n), 0), bio_key);
|
||||
|
||||
n->bi_end_io = bch_cache_read_endio;
|
||||
n->bi_private = &s->cl;
|
||||
@ -735,8 +735,8 @@ static void cache_lookup(struct closure *cl)
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
|
||||
int ret = bch_btree_map_keys(op, op->c,
|
||||
&KEY(op->inode, bio->bi_sector, 0),
|
||||
int ret = bch_btree_map_keys(op, s->c,
|
||||
&KEY(s->inode, bio->bi_sector, 0),
|
||||
cache_lookup_fn, MAP_END_KEY);
|
||||
if (ret == -EAGAIN)
|
||||
continue_at(cl, cache_lookup, bcache_wq);
|
||||
@ -793,8 +793,8 @@ static void search_free(struct closure *cl)
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
bio_complete(s);
|
||||
|
||||
if (s->op.cache_bio)
|
||||
bio_put(s->op.cache_bio);
|
||||
if (s->cache_bio)
|
||||
bio_put(s->cache_bio);
|
||||
|
||||
if (s->unaligned_bvec)
|
||||
mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
|
||||
@ -813,14 +813,14 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
|
||||
|
||||
__closure_init(&s->cl, NULL);
|
||||
|
||||
s->op.inode = d->id;
|
||||
s->op.c = d->c;
|
||||
s->inode = d->id;
|
||||
s->c = d->c;
|
||||
s->d = d;
|
||||
s->op.lock = -1;
|
||||
s->task = current;
|
||||
s->orig_bio = bio;
|
||||
s->write = (bio->bi_rw & REQ_WRITE) != 0;
|
||||
s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
|
||||
s->flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
|
||||
s->recoverable = 1;
|
||||
s->start_time = jiffies;
|
||||
do_bio_hook(s);
|
||||
@ -891,7 +891,7 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
|
||||
|
||||
static bool check_should_bypass(struct cached_dev *dc, struct search *s)
|
||||
{
|
||||
struct cache_set *c = s->op.c;
|
||||
struct cache_set *c = s->c;
|
||||
struct bio *bio = &s->bio.bio;
|
||||
unsigned mode = cache_mode(dc, bio);
|
||||
unsigned sectors, congested = bch_get_congested(c);
|
||||
@ -985,11 +985,11 @@ static void cached_dev_cache_miss_done(struct closure *cl)
|
||||
if (s->op.insert_collision)
|
||||
bch_mark_cache_miss_collision(s);
|
||||
|
||||
if (s->op.cache_bio) {
|
||||
if (s->cache_bio) {
|
||||
int i;
|
||||
struct bio_vec *bv;
|
||||
|
||||
__bio_for_each_segment(bv, s->op.cache_bio, i, 0)
|
||||
bio_for_each_segment_all(bv, s->cache_bio, i)
|
||||
__free_page(bv->bv_page);
|
||||
}
|
||||
|
||||
@ -1042,14 +1042,15 @@ static void cached_dev_read_done(struct closure *cl)
|
||||
* to the buffers the original bio pointed to:
|
||||
*/
|
||||
|
||||
if (s->op.cache_bio) {
|
||||
bio_reset(s->op.cache_bio);
|
||||
s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
|
||||
s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
|
||||
s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
|
||||
bch_bio_map(s->op.cache_bio, NULL);
|
||||
if (s->cache_bio) {
|
||||
bio_reset(s->cache_bio);
|
||||
s->cache_bio->bi_sector =
|
||||
s->cache_miss->bi_sector;
|
||||
s->cache_bio->bi_bdev = s->cache_miss->bi_bdev;
|
||||
s->cache_bio->bi_size = s->cache_bio_sectors << 9;
|
||||
bch_bio_map(s->cache_bio, NULL);
|
||||
|
||||
bio_copy_data(s->cache_miss, s->op.cache_bio);
|
||||
bio_copy_data(s->cache_miss, s->cache_bio);
|
||||
|
||||
bio_put(s->cache_miss);
|
||||
s->cache_miss = NULL;
|
||||
@ -1060,8 +1061,8 @@ static void cached_dev_read_done(struct closure *cl)
|
||||
|
||||
bio_complete(s);
|
||||
|
||||
if (s->op.cache_bio &&
|
||||
!test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
|
||||
if (s->cache_bio &&
|
||||
!test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
|
||||
s->op.type = BTREE_REPLACE;
|
||||
closure_call(&s->op.cl, bch_data_insert, NULL, cl);
|
||||
}
|
||||
@ -1074,12 +1075,12 @@ static void cached_dev_read_done_bh(struct closure *cl)
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
|
||||
bch_mark_cache_accounting(s, !s->cache_miss, s->op.bypass);
|
||||
trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.bypass);
|
||||
bch_mark_cache_accounting(s, !s->cache_miss, s->bypass);
|
||||
trace_bcache_read(s->orig_bio, !s->cache_miss, s->bypass);
|
||||
|
||||
if (s->error)
|
||||
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
|
||||
else if (s->op.cache_bio || verify(dc, &s->bio.bio))
|
||||
else if (s->cache_bio || verify(dc, &s->bio.bio))
|
||||
continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
|
||||
else
|
||||
continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
|
||||
@ -1093,7 +1094,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
struct bio *miss, *cache_bio;
|
||||
|
||||
if (s->cache_miss || s->op.bypass) {
|
||||
if (s->cache_miss || s->bypass) {
|
||||
miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
|
||||
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
|
||||
goto out_submit;
|
||||
@ -1101,13 +1102,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
||||
|
||||
if (!(bio->bi_rw & REQ_RAHEAD) &&
|
||||
!(bio->bi_rw & REQ_META) &&
|
||||
s->op.c->gc_stats.in_use < CUTOFF_CACHE_READA)
|
||||
s->c->gc_stats.in_use < CUTOFF_CACHE_READA)
|
||||
reada = min_t(sector_t, dc->readahead >> 9,
|
||||
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
|
||||
|
||||
s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada);
|
||||
|
||||
s->op.replace = KEY(s->op.inode, bio->bi_sector +
|
||||
s->op.replace = KEY(s->inode, bio->bi_sector +
|
||||
s->cache_bio_sectors, s->cache_bio_sectors);
|
||||
|
||||
ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace);
|
||||
@ -1137,7 +1138,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
||||
goto out_put;
|
||||
|
||||
s->cache_miss = miss;
|
||||
s->op.cache_bio = cache_bio;
|
||||
s->cache_bio = cache_bio;
|
||||
bio_get(cache_bio);
|
||||
closure_bio_submit(cache_bio, &s->cl, s->d);
|
||||
|
||||
@ -1177,7 +1178,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||
struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
|
||||
struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
|
||||
|
||||
bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
|
||||
bch_keybuf_check_overlapping(&s->c->moving_gc_keys, &start, &end);
|
||||
|
||||
down_read_non_owner(&dc->writeback_lock);
|
||||
if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
|
||||
@ -1185,7 +1186,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||
* We overlap with some dirty data undergoing background
|
||||
* writeback, force this write to writeback
|
||||
*/
|
||||
s->op.bypass = false;
|
||||
s->bypass = false;
|
||||
s->writeback = true;
|
||||
}
|
||||
|
||||
@ -1197,27 +1198,27 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||
* so we still want to call it.
|
||||
*/
|
||||
if (bio->bi_rw & REQ_DISCARD)
|
||||
s->op.bypass = true;
|
||||
s->bypass = true;
|
||||
|
||||
if (should_writeback(dc, s->orig_bio,
|
||||
cache_mode(dc, bio),
|
||||
s->op.bypass)) {
|
||||
s->op.bypass = false;
|
||||
s->bypass)) {
|
||||
s->bypass = false;
|
||||
s->writeback = true;
|
||||
}
|
||||
|
||||
trace_bcache_write(s->orig_bio, s->writeback, s->op.bypass);
|
||||
trace_bcache_write(s->orig_bio, s->writeback, s->bypass);
|
||||
|
||||
if (s->op.bypass) {
|
||||
s->op.cache_bio = s->orig_bio;
|
||||
bio_get(s->op.cache_bio);
|
||||
if (s->bypass) {
|
||||
s->cache_bio = s->orig_bio;
|
||||
bio_get(s->cache_bio);
|
||||
|
||||
if (!(bio->bi_rw & REQ_DISCARD) ||
|
||||
blk_queue_discard(bdev_get_queue(dc->bdev)))
|
||||
closure_bio_submit(bio, cl, s->d);
|
||||
} else if (s->writeback) {
|
||||
bch_writeback_add(dc);
|
||||
s->op.cache_bio = bio;
|
||||
s->cache_bio = bio;
|
||||
|
||||
if (bio->bi_rw & REQ_FLUSH) {
|
||||
/* Also need to send a flush to the backing device */
|
||||
@ -1232,8 +1233,8 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||
closure_bio_submit(flush, cl, s->d);
|
||||
}
|
||||
} else {
|
||||
s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
|
||||
dc->disk.bio_split);
|
||||
s->cache_bio = bio_clone_bioset(bio, GFP_NOIO,
|
||||
dc->disk.bio_split);
|
||||
|
||||
closure_bio_submit(bio, cl, s->d);
|
||||
}
|
||||
@ -1247,8 +1248,8 @@ static void cached_dev_nodata(struct closure *cl)
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
|
||||
if (s->op.flush_journal)
|
||||
bch_journal_meta(s->op.c, cl);
|
||||
if (s->flush_journal)
|
||||
bch_journal_meta(s->c, cl);
|
||||
|
||||
/* If it's a flush, we send the flush to the backing device too */
|
||||
closure_bio_submit(bio, cl, s->d);
|
||||
@ -1286,7 +1287,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
|
||||
cached_dev_nodata,
|
||||
bcache_wq);
|
||||
} else {
|
||||
s->op.bypass = check_should_bypass(dc, s);
|
||||
s->bypass = check_should_bypass(dc, s);
|
||||
|
||||
if (rw)
|
||||
cached_dev_write(dc, s);
|
||||
@ -1376,8 +1377,8 @@ static void flash_dev_nodata(struct closure *cl)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
|
||||
if (s->op.flush_journal)
|
||||
bch_journal_meta(s->op.c, cl);
|
||||
if (s->flush_journal)
|
||||
bch_journal_meta(s->c, cl);
|
||||
|
||||
continue_at(cl, search_free, NULL);
|
||||
}
|
||||
@ -1409,13 +1410,13 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
|
||||
flash_dev_nodata,
|
||||
bcache_wq);
|
||||
} else if (rw) {
|
||||
bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
|
||||
bch_keybuf_check_overlapping(&s->c->moving_gc_keys,
|
||||
&KEY(d->id, bio->bi_sector, 0),
|
||||
&KEY(d->id, bio_end_sector(bio), 0));
|
||||
|
||||
s->op.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
|
||||
s->bypass = (bio->bi_rw & REQ_DISCARD) != 0;
|
||||
s->writeback = true;
|
||||
s->op.cache_bio = bio;
|
||||
s->cache_bio = bio;
|
||||
|
||||
closure_call(&s->op.cl, bch_data_insert, NULL, cl);
|
||||
} else {
|
||||
|
@ -8,19 +8,33 @@ struct search {
|
||||
struct closure cl;
|
||||
|
||||
struct bcache_device *d;
|
||||
struct cache_set *c;
|
||||
struct task_struct *task;
|
||||
|
||||
struct bbio bio;
|
||||
struct bio *orig_bio;
|
||||
struct bio *cache_miss;
|
||||
|
||||
/* Bio to be inserted into the cache */
|
||||
struct bio *cache_bio;
|
||||
unsigned cache_bio_sectors;
|
||||
|
||||
unsigned inode;
|
||||
|
||||
unsigned recoverable:1;
|
||||
unsigned unaligned_bvec:1;
|
||||
|
||||
unsigned write:1;
|
||||
unsigned writeback:1;
|
||||
|
||||
unsigned csum:1;
|
||||
unsigned bypass:1;
|
||||
unsigned flush_journal:1;
|
||||
|
||||
unsigned insert_data_done:1;
|
||||
|
||||
uint16_t write_prio;
|
||||
|
||||
/* IO error returned to s->bio */
|
||||
short error;
|
||||
unsigned long start_time;
|
||||
|
@ -200,7 +200,7 @@ void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
|
||||
{
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
mark_cache_stats(&dc->accounting.collector, hit, bypass);
|
||||
mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
|
||||
mark_cache_stats(&s->c->accounting.collector, hit, bypass);
|
||||
#ifdef CONFIG_CGROUP_BCACHE
|
||||
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
|
||||
#endif
|
||||
@ -210,21 +210,21 @@ void bch_mark_cache_readahead(struct search *s)
|
||||
{
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
atomic_inc(&dc->accounting.collector.cache_readaheads);
|
||||
atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
|
||||
atomic_inc(&s->c->accounting.collector.cache_readaheads);
|
||||
}
|
||||
|
||||
void bch_mark_cache_miss_collision(struct search *s)
|
||||
{
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
|
||||
atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
|
||||
atomic_inc(&s->c->accounting.collector.cache_miss_collisions);
|
||||
}
|
||||
|
||||
void bch_mark_sectors_bypassed(struct search *s, int sectors)
|
||||
{
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
|
||||
atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
|
||||
atomic_add(sectors, &s->c->accounting.collector.sectors_bypassed);
|
||||
}
|
||||
|
||||
void bch_cache_accounting_init(struct cache_accounting *acc,
|
||||
|
@ -1493,11 +1493,10 @@ static void run_cache_set(struct cache_set *c)
|
||||
const char *err = "cannot allocate memory";
|
||||
struct cached_dev *dc, *t;
|
||||
struct cache *ca;
|
||||
struct closure cl;
|
||||
unsigned i;
|
||||
|
||||
struct btree_op op;
|
||||
bch_btree_op_init_stack(&op);
|
||||
op.lock = SHRT_MAX;
|
||||
closure_init_stack(&cl);
|
||||
|
||||
for_each_cache(ca, c, i)
|
||||
c->nbuckets += ca->sb.nbuckets;
|
||||
@ -1508,7 +1507,7 @@ static void run_cache_set(struct cache_set *c)
|
||||
struct jset *j;
|
||||
|
||||
err = "cannot allocate memory for journal";
|
||||
if (bch_journal_read(c, &journal, &op))
|
||||
if (bch_journal_read(c, &journal))
|
||||
goto err;
|
||||
|
||||
pr_debug("btree_journal_read() done");
|
||||
@ -1543,12 +1542,12 @@ static void run_cache_set(struct cache_set *c)
|
||||
list_del_init(&c->root->list);
|
||||
rw_unlock(true, c->root);
|
||||
|
||||
err = uuid_read(c, j, &op.cl);
|
||||
err = uuid_read(c, j, &cl);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
err = "error in recovery";
|
||||
if (bch_btree_check(c, &op))
|
||||
if (bch_btree_check(c))
|
||||
goto err;
|
||||
|
||||
bch_journal_mark(c, &journal);
|
||||
@ -1580,7 +1579,7 @@ static void run_cache_set(struct cache_set *c)
|
||||
if (j->version < BCACHE_JSET_VERSION_UUID)
|
||||
__uuid_write(c);
|
||||
|
||||
bch_journal_replay(c, &journal, &op);
|
||||
bch_journal_replay(c, &journal);
|
||||
} else {
|
||||
pr_notice("invalidating existing data");
|
||||
|
||||
@ -1616,7 +1615,7 @@ static void run_cache_set(struct cache_set *c)
|
||||
goto err;
|
||||
|
||||
bkey_copy_key(&c->root->key, &MAX_KEY);
|
||||
bch_btree_node_write(c->root, &op.cl);
|
||||
bch_btree_node_write(c->root, &cl);
|
||||
|
||||
bch_btree_set_root(c->root);
|
||||
rw_unlock(true, c->root);
|
||||
@ -1629,14 +1628,14 @@ static void run_cache_set(struct cache_set *c)
|
||||
SET_CACHE_SYNC(&c->sb, true);
|
||||
|
||||
bch_journal_next(&c->journal);
|
||||
bch_journal_meta(c, &op.cl);
|
||||
bch_journal_meta(c, &cl);
|
||||
}
|
||||
|
||||
err = "error starting gc thread";
|
||||
if (bch_gc_thread_start(c))
|
||||
goto err;
|
||||
|
||||
closure_sync(&op.cl);
|
||||
closure_sync(&cl);
|
||||
c->sb.last_mount = get_seconds();
|
||||
bcache_write_super(c);
|
||||
|
||||
@ -1647,7 +1646,7 @@ static void run_cache_set(struct cache_set *c)
|
||||
|
||||
return;
|
||||
err:
|
||||
closure_sync(&op.cl);
|
||||
closure_sync(&cl);
|
||||
/* XXX: test this, it's broken */
|
||||
bch_cache_set_error(c, err);
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ static void write_dirty_finish(struct closure *cl)
|
||||
for (i = 0; i < KEY_PTRS(&w->key); i++)
|
||||
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
|
||||
|
||||
bch_btree_insert(&op, dc->disk.c, &keys);
|
||||
bch_btree_insert(&op, dc->disk.c, &keys, NULL);
|
||||
closure_sync(&op.cl);
|
||||
|
||||
if (op.insert_collision)
|
||||
@ -433,9 +433,16 @@ static int bch_writeback_thread(void *arg)
|
||||
|
||||
/* Init */
|
||||
|
||||
static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
|
||||
struct sectors_dirty_init {
|
||||
struct btree_op op;
|
||||
unsigned inode;
|
||||
};
|
||||
|
||||
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
|
||||
struct bkey *k)
|
||||
{
|
||||
struct sectors_dirty_init *op = container_of(_op,
|
||||
struct sectors_dirty_init, op);
|
||||
if (KEY_INODE(k) > op->inode)
|
||||
return MAP_DONE;
|
||||
|
||||
@ -448,12 +455,12 @@ static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
|
||||
|
||||
void bch_sectors_dirty_init(struct cached_dev *dc)
|
||||
{
|
||||
struct btree_op op;
|
||||
struct sectors_dirty_init op;
|
||||
|
||||
bch_btree_op_init_stack(&op);
|
||||
bch_btree_op_init_stack(&op.op);
|
||||
op.inode = dc->disk.id;
|
||||
|
||||
bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0),
|
||||
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
|
||||
sectors_dirty_init_fn, 0);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user