diff options
Diffstat (limited to 'drivers/md/bcache/btree.h')
| -rw-r--r-- | drivers/md/bcache/btree.h | 35 |
1 files changed, 16 insertions, 19 deletions
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index af4a7092a28c..3333d3723633 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
| @@ -102,7 +102,6 @@ | |||
| 102 | #include "debug.h" | 102 | #include "debug.h" |
| 103 | 103 | ||
| 104 | struct btree_write { | 104 | struct btree_write { |
| 105 | struct closure *owner; | ||
| 106 | atomic_t *journal; | 105 | atomic_t *journal; |
| 107 | 106 | ||
| 108 | /* If btree_split() frees a btree node, it writes a new pointer to that | 107 | /* If btree_split() frees a btree node, it writes a new pointer to that |
| @@ -142,16 +141,12 @@ struct btree { | |||
| 142 | */ | 141 | */ |
| 143 | struct bset_tree sets[MAX_BSETS]; | 142 | struct bset_tree sets[MAX_BSETS]; |
| 144 | 143 | ||
| 145 | /* Used to refcount bio splits, also protects b->bio */ | 144 | /* For outstanding btree writes, used as a lock - protects write_idx */ |
| 146 | struct closure_with_waitlist io; | 145 | struct closure_with_waitlist io; |
| 147 | 146 | ||
| 148 | /* Gets transferred to w->prio_blocked - see the comment there */ | ||
| 149 | int prio_blocked; | ||
| 150 | |||
| 151 | struct list_head list; | 147 | struct list_head list; |
| 152 | struct delayed_work work; | 148 | struct delayed_work work; |
| 153 | 149 | ||
| 154 | uint64_t io_start_time; | ||
| 155 | struct btree_write writes[2]; | 150 | struct btree_write writes[2]; |
| 156 | struct bio *bio; | 151 | struct bio *bio; |
| 157 | }; | 152 | }; |
| @@ -164,13 +159,11 @@ static inline void set_btree_node_ ## flag(struct btree *b) \ | |||
| 164 | { set_bit(BTREE_NODE_ ## flag, &b->flags); } \ | 159 | { set_bit(BTREE_NODE_ ## flag, &b->flags); } \ |
| 165 | 160 | ||
| 166 | enum btree_flags { | 161 | enum btree_flags { |
| 167 | BTREE_NODE_read_done, | ||
| 168 | BTREE_NODE_io_error, | 162 | BTREE_NODE_io_error, |
| 169 | BTREE_NODE_dirty, | 163 | BTREE_NODE_dirty, |
| 170 | BTREE_NODE_write_idx, | 164 | BTREE_NODE_write_idx, |
| 171 | }; | 165 | }; |
| 172 | 166 | ||
| 173 | BTREE_FLAG(read_done); | ||
| 174 | BTREE_FLAG(io_error); | 167 | BTREE_FLAG(io_error); |
| 175 | BTREE_FLAG(dirty); | 168 | BTREE_FLAG(dirty); |
| 176 | BTREE_FLAG(write_idx); | 169 | BTREE_FLAG(write_idx); |
| @@ -278,6 +271,13 @@ struct btree_op { | |||
| 278 | BKEY_PADDED(replace); | 271 | BKEY_PADDED(replace); |
| 279 | }; | 272 | }; |
| 280 | 273 | ||
| 274 | enum { | ||
| 275 | BTREE_INSERT_STATUS_INSERT, | ||
| 276 | BTREE_INSERT_STATUS_BACK_MERGE, | ||
| 277 | BTREE_INSERT_STATUS_OVERWROTE, | ||
| 278 | BTREE_INSERT_STATUS_FRONT_MERGE, | ||
| 279 | }; | ||
| 280 | |||
| 281 | void bch_btree_op_init_stack(struct btree_op *); | 281 | void bch_btree_op_init_stack(struct btree_op *); |
| 282 | 282 | ||
| 283 | static inline void rw_lock(bool w, struct btree *b, int level) | 283 | static inline void rw_lock(bool w, struct btree *b, int level) |
| @@ -293,9 +293,7 @@ static inline void rw_unlock(bool w, struct btree *b) | |||
| 293 | #ifdef CONFIG_BCACHE_EDEBUG | 293 | #ifdef CONFIG_BCACHE_EDEBUG |
| 294 | unsigned i; | 294 | unsigned i; |
| 295 | 295 | ||
| 296 | if (w && | 296 | if (w && b->key.ptr[0]) |
| 297 | b->key.ptr[0] && | ||
| 298 | btree_node_read_done(b)) | ||
| 299 | for (i = 0; i <= b->nsets; i++) | 297 | for (i = 0; i <= b->nsets; i++) |
| 300 | bch_check_key_order(b, b->sets[i].data); | 298 | bch_check_key_order(b, b->sets[i].data); |
| 301 | #endif | 299 | #endif |
| @@ -370,9 +368,8 @@ static inline bool should_split(struct btree *b) | |||
| 370 | > btree_blocks(b)); | 368 | > btree_blocks(b)); |
| 371 | } | 369 | } |
| 372 | 370 | ||
| 373 | void bch_btree_read_done(struct closure *); | 371 | void bch_btree_node_read(struct btree *); |
| 374 | void bch_btree_read(struct btree *); | 372 | void bch_btree_node_write(struct btree *, struct closure *); |
| 375 | void bch_btree_write(struct btree *b, bool now, struct btree_op *op); | ||
| 376 | 373 | ||
| 377 | void bch_cannibalize_unlock(struct cache_set *, struct closure *); | 374 | void bch_cannibalize_unlock(struct cache_set *, struct closure *); |
| 378 | void bch_btree_set_root(struct btree *); | 375 | void bch_btree_set_root(struct btree *); |
| @@ -380,7 +377,6 @@ struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); | |||
| 380 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, | 377 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, |
| 381 | int, struct btree_op *); | 378 | int, struct btree_op *); |
| 382 | 379 | ||
| 383 | bool bch_btree_insert_keys(struct btree *, struct btree_op *); | ||
| 384 | bool bch_btree_insert_check_key(struct btree *, struct btree_op *, | 380 | bool bch_btree_insert_check_key(struct btree *, struct btree_op *, |
| 385 | struct bio *); | 381 | struct bio *); |
| 386 | int bch_btree_insert(struct btree_op *, struct cache_set *); | 382 | int bch_btree_insert(struct btree_op *, struct cache_set *); |
| @@ -393,13 +389,14 @@ void bch_moving_gc(struct closure *); | |||
| 393 | int bch_btree_check(struct cache_set *, struct btree_op *); | 389 | int bch_btree_check(struct cache_set *, struct btree_op *); |
| 394 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); | 390 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); |
| 395 | 391 | ||
| 396 | void bch_keybuf_init(struct keybuf *, keybuf_pred_fn *); | 392 | void bch_keybuf_init(struct keybuf *); |
| 397 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *); | 393 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, |
| 394 | keybuf_pred_fn *); | ||
| 398 | bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, | 395 | bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, |
| 399 | struct bkey *); | 396 | struct bkey *); |
| 400 | void bch_keybuf_del(struct keybuf *, struct keybuf_key *); | 397 | void bch_keybuf_del(struct keybuf *, struct keybuf_key *); |
| 401 | struct keybuf_key *bch_keybuf_next(struct keybuf *); | 398 | struct keybuf_key *bch_keybuf_next(struct keybuf *); |
| 402 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, | 399 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, |
| 403 | struct keybuf *, struct bkey *); | 400 | struct bkey *, keybuf_pred_fn *); |
| 404 | 401 | ||
| 405 | #endif | 402 | #endif |
