diff options
Diffstat (limited to 'drivers/md/bcache/btree.h')
-rw-r--r-- | drivers/md/bcache/btree.h | 195 |
1 files changed, 65 insertions, 130 deletions
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 3333d3723633..767e75570896 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
@@ -125,6 +125,7 @@ struct btree { | |||
125 | unsigned long seq; | 125 | unsigned long seq; |
126 | struct rw_semaphore lock; | 126 | struct rw_semaphore lock; |
127 | struct cache_set *c; | 127 | struct cache_set *c; |
128 | struct btree *parent; | ||
128 | 129 | ||
129 | unsigned long flags; | 130 | unsigned long flags; |
130 | uint16_t written; /* would be nice to kill */ | 131 | uint16_t written; /* would be nice to kill */ |
@@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k) | |||
200 | 201 | ||
201 | static inline void set_gc_sectors(struct cache_set *c) | 202 | static inline void set_gc_sectors(struct cache_set *c) |
202 | { | 203 | { |
203 | atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8); | 204 | atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); |
204 | } | ||
205 | |||
206 | static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k) | ||
207 | { | ||
208 | return __bch_ptr_invalid(b->c, b->level, k); | ||
209 | } | 205 | } |
210 | 206 | ||
211 | static inline struct bkey *bch_btree_iter_init(struct btree *b, | 207 | static inline struct bkey *bch_btree_iter_init(struct btree *b, |
@@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b, | |||
215 | return __bch_btree_iter_init(b, iter, search, b->sets); | 211 | return __bch_btree_iter_init(b, iter, search, b->sets); |
216 | } | 212 | } |
217 | 213 | ||
214 | static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k) | ||
215 | { | ||
216 | if (b->level) | ||
217 | return bch_btree_ptr_invalid(b->c, k); | ||
218 | else | ||
219 | return bch_extent_ptr_invalid(b->c, k); | ||
220 | } | ||
221 | |||
222 | void bkey_put(struct cache_set *c, struct bkey *k); | ||
223 | |||
218 | /* Looping macros */ | 224 | /* Looping macros */ |
219 | 225 | ||
220 | #define for_each_cached_btree(b, c, iter) \ | 226 | #define for_each_cached_btree(b, c, iter) \ |
@@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b, | |||
234 | /* Recursing down the btree */ | 240 | /* Recursing down the btree */ |
235 | 241 | ||
236 | struct btree_op { | 242 | struct btree_op { |
237 | struct closure cl; | ||
238 | struct cache_set *c; | ||
239 | |||
240 | /* Journal entry we have a refcount on */ | ||
241 | atomic_t *journal; | ||
242 | |||
243 | /* Bio to be inserted into the cache */ | ||
244 | struct bio *cache_bio; | ||
245 | |||
246 | unsigned inode; | ||
247 | |||
248 | uint16_t write_prio; | ||
249 | |||
250 | /* Btree level at which we start taking write locks */ | 243 | /* Btree level at which we start taking write locks */ |
251 | short lock; | 244 | short lock; |
252 | 245 | ||
253 | /* Btree insertion type */ | ||
254 | enum { | ||
255 | BTREE_INSERT, | ||
256 | BTREE_REPLACE | ||
257 | } type:8; | ||
258 | |||
259 | unsigned csum:1; | ||
260 | unsigned skip:1; | ||
261 | unsigned flush_journal:1; | ||
262 | |||
263 | unsigned insert_data_done:1; | ||
264 | unsigned lookup_done:1; | ||
265 | unsigned insert_collision:1; | 246 | unsigned insert_collision:1; |
266 | |||
267 | /* Anything after this point won't get zeroed in do_bio_hook() */ | ||
268 | |||
269 | /* Keys to be inserted */ | ||
270 | struct keylist keys; | ||
271 | BKEY_PADDED(replace); | ||
272 | }; | 247 | }; |
273 | 248 | ||
274 | enum { | 249 | static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) |
275 | BTREE_INSERT_STATUS_INSERT, | 250 | { |
276 | BTREE_INSERT_STATUS_BACK_MERGE, | 251 | memset(op, 0, sizeof(struct btree_op)); |
277 | BTREE_INSERT_STATUS_OVERWROTE, | 252 | op->lock = write_lock_level; |
278 | BTREE_INSERT_STATUS_FRONT_MERGE, | 253 | } |
279 | }; | ||
280 | |||
281 | void bch_btree_op_init_stack(struct btree_op *); | ||
282 | 254 | ||
283 | static inline void rw_lock(bool w, struct btree *b, int level) | 255 | static inline void rw_lock(bool w, struct btree *b, int level) |
284 | { | 256 | { |
@@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level) | |||
290 | 262 | ||
291 | static inline void rw_unlock(bool w, struct btree *b) | 263 | static inline void rw_unlock(bool w, struct btree *b) |
292 | { | 264 | { |
293 | #ifdef CONFIG_BCACHE_EDEBUG | ||
294 | unsigned i; | ||
295 | |||
296 | if (w && b->key.ptr[0]) | ||
297 | for (i = 0; i <= b->nsets; i++) | ||
298 | bch_check_key_order(b, b->sets[i].data); | ||
299 | #endif | ||
300 | |||
301 | if (w) | 265 | if (w) |
302 | b->seq++; | 266 | b->seq++; |
303 | (w ? up_write : up_read)(&b->lock); | 267 | (w ? up_write : up_read)(&b->lock); |
304 | } | 268 | } |
305 | 269 | ||
306 | #define insert_lock(s, b) ((b)->level <= (s)->lock) | 270 | void bch_btree_node_read(struct btree *); |
271 | void bch_btree_node_write(struct btree *, struct closure *); | ||
307 | 272 | ||
308 | /* | 273 | void bch_btree_set_root(struct btree *); |
309 | * These macros are for recursing down the btree - they handle the details of | 274 | struct btree *bch_btree_node_alloc(struct cache_set *, int, bool); |
310 | * locking and looking up nodes in the cache for you. They're best treated as | 275 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool); |
311 | * mere syntax when reading code that uses them. | ||
312 | * | ||
313 | * op->lock determines whether we take a read or a write lock at a given depth. | ||
314 | * If you've got a read lock and find that you need a write lock (i.e. you're | ||
315 | * going to have to split), set op->lock and return -EINTR; btree_root() will | ||
316 | * call you again and you'll have the correct lock. | ||
317 | */ | ||
318 | 276 | ||
319 | /** | 277 | int bch_btree_insert_check_key(struct btree *, struct btree_op *, |
320 | * btree - recurse down the btree on a specified key | 278 | struct bkey *); |
321 | * @fn: function to call, which will be passed the child node | 279 | int bch_btree_insert(struct cache_set *, struct keylist *, |
322 | * @key: key to recurse on | 280 | atomic_t *, struct bkey *); |
323 | * @b: parent btree node | 281 | |
324 | * @op: pointer to struct btree_op | 282 | int bch_gc_thread_start(struct cache_set *); |
325 | */ | 283 | size_t bch_btree_gc_finish(struct cache_set *); |
326 | #define btree(fn, key, b, op, ...) \ | 284 | void bch_moving_gc(struct cache_set *); |
327 | ({ \ | 285 | int bch_btree_check(struct cache_set *); |
328 | int _r, l = (b)->level - 1; \ | 286 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); |
329 | bool _w = l <= (op)->lock; \ | ||
330 | struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \ | ||
331 | if (!IS_ERR(_b)) { \ | ||
332 | _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ | ||
333 | rw_unlock(_w, _b); \ | ||
334 | } else \ | ||
335 | _r = PTR_ERR(_b); \ | ||
336 | _r; \ | ||
337 | }) | ||
338 | |||
339 | /** | ||
340 | * btree_root - call a function on the root of the btree | ||
341 | * @fn: function to call, which will be passed the child node | ||
342 | * @c: cache set | ||
343 | * @op: pointer to struct btree_op | ||
344 | */ | ||
345 | #define btree_root(fn, c, op, ...) \ | ||
346 | ({ \ | ||
347 | int _r = -EINTR; \ | ||
348 | do { \ | ||
349 | struct btree *_b = (c)->root; \ | ||
350 | bool _w = insert_lock(op, _b); \ | ||
351 | rw_lock(_w, _b, _b->level); \ | ||
352 | if (_b == (c)->root && \ | ||
353 | _w == insert_lock(op, _b)) \ | ||
354 | _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ | ||
355 | rw_unlock(_w, _b); \ | ||
356 | bch_cannibalize_unlock(c, &(op)->cl); \ | ||
357 | } while (_r == -EINTR); \ | ||
358 | \ | ||
359 | _r; \ | ||
360 | }) | ||
361 | 287 | ||
362 | static inline bool should_split(struct btree *b) | 288 | static inline void wake_up_gc(struct cache_set *c) |
363 | { | 289 | { |
364 | struct bset *i = write_block(b); | 290 | if (c->gc_thread) |
365 | return b->written >= btree_blocks(b) || | 291 | wake_up_process(c->gc_thread); |
366 | (i->seq == b->sets[0].data->seq && | ||
367 | b->written + __set_blocks(i, i->keys + 15, b->c) | ||
368 | > btree_blocks(b)); | ||
369 | } | 292 | } |
370 | 293 | ||
371 | void bch_btree_node_read(struct btree *); | 294 | #define MAP_DONE 0 |
372 | void bch_btree_node_write(struct btree *, struct closure *); | 295 | #define MAP_CONTINUE 1 |
373 | 296 | ||
374 | void bch_cannibalize_unlock(struct cache_set *, struct closure *); | 297 | #define MAP_ALL_NODES 0 |
375 | void bch_btree_set_root(struct btree *); | 298 | #define MAP_LEAF_NODES 1 |
376 | struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); | ||
377 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, | ||
378 | int, struct btree_op *); | ||
379 | 299 | ||
380 | bool bch_btree_insert_check_key(struct btree *, struct btree_op *, | 300 | #define MAP_END_KEY 1 |
381 | struct bio *); | ||
382 | int bch_btree_insert(struct btree_op *, struct cache_set *); | ||
383 | 301 | ||
384 | int bch_btree_search_recurse(struct btree *, struct btree_op *); | 302 | typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *); |
303 | int __bch_btree_map_nodes(struct btree_op *, struct cache_set *, | ||
304 | struct bkey *, btree_map_nodes_fn *, int); | ||
385 | 305 | ||
386 | void bch_queue_gc(struct cache_set *); | 306 | static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, |
387 | size_t bch_btree_gc_finish(struct cache_set *); | 307 | struct bkey *from, btree_map_nodes_fn *fn) |
388 | void bch_moving_gc(struct closure *); | 308 | { |
389 | int bch_btree_check(struct cache_set *, struct btree_op *); | 309 | return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES); |
390 | uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); | 310 | } |
311 | |||
312 | static inline int bch_btree_map_leaf_nodes(struct btree_op *op, | ||
313 | struct cache_set *c, | ||
314 | struct bkey *from, | ||
315 | btree_map_nodes_fn *fn) | ||
316 | { | ||
317 | return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); | ||
318 | } | ||
319 | |||
320 | typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *, | ||
321 | struct bkey *); | ||
322 | int bch_btree_map_keys(struct btree_op *, struct cache_set *, | ||
323 | struct bkey *, btree_map_keys_fn *, int); | ||
324 | |||
325 | typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); | ||
391 | 326 | ||
392 | void bch_keybuf_init(struct keybuf *); | 327 | void bch_keybuf_init(struct keybuf *); |
393 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, | 328 | void bch_refill_keybuf(struct cache_set *, struct keybuf *, |
394 | keybuf_pred_fn *); | 329 | struct bkey *, keybuf_pred_fn *); |
395 | bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, | 330 | bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, |
396 | struct bkey *); | 331 | struct bkey *); |
397 | void bch_keybuf_del(struct keybuf *, struct keybuf_key *); | 332 | void bch_keybuf_del(struct keybuf *, struct keybuf_key *); |