diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-09-10 21:48:51 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 00:56:06 -0500 |
commit | 48dad8baf92fe8967d9e1358af1cfdda1d2d3298 (patch) | |
tree | c773b028b0123acb07b55690a3cfb1f3ea230587 /drivers/md/bcache/btree.c | |
parent | 5e6926daac267dd99552ae613f041a9e88bbf258 (diff) |
bcache: Add btree_map() functions
Lots of stuff has been open coding its own btree traversal - which is
generally pretty simple code, but there are a few subtleties.
This adds new new functions, bch_btree_map_nodes() and
bch_btree_map_keys(), which do the traversal for you. Everything that's
open coding btree traversal now (with the exception of garbage
collection) is slowly going to be converted to these two functions;
being able to write other code at a higher level of abstraction is a
big improvement w.r.t. overall code quality.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r-- | drivers/md/bcache/btree.c | 174 |
1 files changed, 123 insertions, 51 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 17bfd87fc8f4..cfbdcf349b7f 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -2296,6 +2296,82 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op) | |||
2296 | return ret; | 2296 | return ret; |
2297 | } | 2297 | } |
2298 | 2298 | ||
2299 | /* Map across nodes or keys */ | ||
2300 | |||
2301 | static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, | ||
2302 | struct bkey *from, | ||
2303 | btree_map_nodes_fn *fn, int flags) | ||
2304 | { | ||
2305 | int ret = MAP_CONTINUE; | ||
2306 | |||
2307 | if (b->level) { | ||
2308 | struct bkey *k; | ||
2309 | struct btree_iter iter; | ||
2310 | |||
2311 | bch_btree_iter_init(b, &iter, from); | ||
2312 | |||
2313 | while ((k = bch_btree_iter_next_filter(&iter, b, | ||
2314 | bch_ptr_bad))) { | ||
2315 | ret = btree(map_nodes_recurse, k, b, | ||
2316 | op, from, fn, flags); | ||
2317 | from = NULL; | ||
2318 | |||
2319 | if (ret != MAP_CONTINUE) | ||
2320 | return ret; | ||
2321 | } | ||
2322 | } | ||
2323 | |||
2324 | if (!b->level || flags == MAP_ALL_NODES) | ||
2325 | ret = fn(op, b); | ||
2326 | |||
2327 | return ret; | ||
2328 | } | ||
2329 | |||
2330 | int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, | ||
2331 | struct bkey *from, btree_map_nodes_fn *fn, int flags) | ||
2332 | { | ||
2333 | int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags); | ||
2334 | if (closure_blocking(&op->cl)) | ||
2335 | closure_sync(&op->cl); | ||
2336 | return ret; | ||
2337 | } | ||
2338 | |||
2339 | static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, | ||
2340 | struct bkey *from, btree_map_keys_fn *fn, | ||
2341 | int flags) | ||
2342 | { | ||
2343 | int ret = MAP_CONTINUE; | ||
2344 | struct bkey *k; | ||
2345 | struct btree_iter iter; | ||
2346 | |||
2347 | bch_btree_iter_init(b, &iter, from); | ||
2348 | |||
2349 | while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) { | ||
2350 | ret = !b->level | ||
2351 | ? fn(op, b, k) | ||
2352 | : btree(map_keys_recurse, k, b, op, from, fn, flags); | ||
2353 | from = NULL; | ||
2354 | |||
2355 | if (ret != MAP_CONTINUE) | ||
2356 | return ret; | ||
2357 | } | ||
2358 | |||
2359 | if (!b->level && (flags & MAP_END_KEY)) | ||
2360 | ret = fn(op, b, &KEY(KEY_INODE(&b->key), | ||
2361 | KEY_OFFSET(&b->key), 0)); | ||
2362 | |||
2363 | return ret; | ||
2364 | } | ||
2365 | |||
2366 | int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, | ||
2367 | struct bkey *from, btree_map_keys_fn *fn, int flags) | ||
2368 | { | ||
2369 | int ret = btree_root(map_keys_recurse, c, op, from, fn, flags); | ||
2370 | if (closure_blocking(&op->cl)) | ||
2371 | closure_sync(&op->cl); | ||
2372 | return ret; | ||
2373 | } | ||
2374 | |||
2299 | /* Keybuf code */ | 2375 | /* Keybuf code */ |
2300 | 2376 | ||
2301 | static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) | 2377 | static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) |
@@ -2314,74 +2390,70 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, | |||
2314 | return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); | 2390 | return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); |
2315 | } | 2391 | } |
2316 | 2392 | ||
2317 | static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, | 2393 | struct refill { |
2318 | struct keybuf *buf, struct bkey *end, | 2394 | struct btree_op op; |
2319 | keybuf_pred_fn *pred) | 2395 | struct keybuf *buf; |
2320 | { | 2396 | struct bkey *end; |
2321 | struct btree_iter iter; | 2397 | keybuf_pred_fn *pred; |
2322 | bch_btree_iter_init(b, &iter, &buf->last_scanned); | 2398 | }; |
2323 | |||
2324 | while (!array_freelist_empty(&buf->freelist)) { | ||
2325 | struct bkey *k = bch_btree_iter_next_filter(&iter, b, | ||
2326 | bch_ptr_bad); | ||
2327 | |||
2328 | if (!b->level) { | ||
2329 | if (!k) { | ||
2330 | buf->last_scanned = b->key; | ||
2331 | break; | ||
2332 | } | ||
2333 | 2399 | ||
2334 | buf->last_scanned = *k; | 2400 | static int refill_keybuf_fn(struct btree_op *op, struct btree *b, |
2335 | if (bkey_cmp(&buf->last_scanned, end) >= 0) | 2401 | struct bkey *k) |
2336 | break; | 2402 | { |
2403 | struct refill *refill = container_of(op, struct refill, op); | ||
2404 | struct keybuf *buf = refill->buf; | ||
2405 | int ret = MAP_CONTINUE; | ||
2337 | 2406 | ||
2338 | if (pred(buf, k)) { | 2407 | if (bkey_cmp(k, refill->end) >= 0) { |
2339 | struct keybuf_key *w; | 2408 | ret = MAP_DONE; |
2409 | goto out; | ||
2410 | } | ||
2340 | 2411 | ||
2341 | spin_lock(&buf->lock); | 2412 | if (!KEY_SIZE(k)) /* end key */ |
2413 | goto out; | ||
2342 | 2414 | ||
2343 | w = array_alloc(&buf->freelist); | 2415 | if (refill->pred(buf, k)) { |
2416 | struct keybuf_key *w; | ||
2344 | 2417 | ||
2345 | w->private = NULL; | 2418 | spin_lock(&buf->lock); |
2346 | bkey_copy(&w->key, k); | ||
2347 | 2419 | ||
2348 | if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) | 2420 | w = array_alloc(&buf->freelist); |
2349 | array_free(&buf->freelist, w); | 2421 | if (!w) { |
2422 | spin_unlock(&buf->lock); | ||
2423 | return MAP_DONE; | ||
2424 | } | ||
2350 | 2425 | ||
2351 | spin_unlock(&buf->lock); | 2426 | w->private = NULL; |
2352 | } | 2427 | bkey_copy(&w->key, k); |
2353 | } else { | ||
2354 | if (!k) | ||
2355 | break; | ||
2356 | 2428 | ||
2357 | btree(refill_keybuf, k, b, op, buf, end, pred); | 2429 | if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) |
2358 | /* | 2430 | array_free(&buf->freelist, w); |
2359 | * Might get an error here, but can't really do anything | ||
2360 | * and it'll get logged elsewhere. Just read what we | ||
2361 | * can. | ||
2362 | */ | ||
2363 | 2431 | ||
2364 | if (bkey_cmp(&buf->last_scanned, end) >= 0) | 2432 | if (array_freelist_empty(&buf->freelist)) |
2365 | break; | 2433 | ret = MAP_DONE; |
2366 | 2434 | ||
2367 | cond_resched(); | 2435 | spin_unlock(&buf->lock); |
2368 | } | ||
2369 | } | 2436 | } |
2370 | 2437 | out: | |
2371 | return 0; | 2438 | buf->last_scanned = *k; |
2439 | return ret; | ||
2372 | } | 2440 | } |
2373 | 2441 | ||
2374 | void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, | 2442 | void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, |
2375 | struct bkey *end, keybuf_pred_fn *pred) | 2443 | struct bkey *end, keybuf_pred_fn *pred) |
2376 | { | 2444 | { |
2377 | struct bkey start = buf->last_scanned; | 2445 | struct bkey start = buf->last_scanned; |
2378 | struct btree_op op; | 2446 | struct refill refill; |
2379 | bch_btree_op_init_stack(&op); | ||
2380 | 2447 | ||
2381 | cond_resched(); | 2448 | cond_resched(); |
2382 | 2449 | ||
2383 | btree_root(refill_keybuf, c, &op, buf, end, pred); | 2450 | bch_btree_op_init_stack(&refill.op); |
2384 | closure_sync(&op.cl); | 2451 | refill.buf = buf; |
2452 | refill.end = end; | ||
2453 | refill.pred = pred; | ||
2454 | |||
2455 | bch_btree_map_keys(&refill.op, c, &buf->last_scanned, | ||
2456 | refill_keybuf_fn, MAP_END_KEY); | ||
2385 | 2457 | ||
2386 | pr_debug("found %s keys from %llu:%llu to %llu:%llu", | 2458 | pr_debug("found %s keys from %llu:%llu to %llu:%llu", |
2387 | RB_EMPTY_ROOT(&buf->keys) ? "no" : | 2459 | RB_EMPTY_ROOT(&buf->keys) ? "no" : |
@@ -2465,9 +2537,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf) | |||
2465 | } | 2537 | } |
2466 | 2538 | ||
2467 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, | 2539 | struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, |
2468 | struct keybuf *buf, | 2540 | struct keybuf *buf, |
2469 | struct bkey *end, | 2541 | struct bkey *end, |
2470 | keybuf_pred_fn *pred) | 2542 | keybuf_pred_fn *pred) |
2471 | { | 2543 | { |
2472 | struct keybuf_key *ret; | 2544 | struct keybuf_key *ret; |
2473 | 2545 | ||