aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-09-10 21:48:51 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:06 -0500
commit48dad8baf92fe8967d9e1358af1cfdda1d2d3298 (patch)
treec773b028b0123acb07b55690a3cfb1f3ea230587 /drivers/md/bcache
parent5e6926daac267dd99552ae613f041a9e88bbf258 (diff)
bcache: Add btree_map() functions
Lots of stuff has been open coding its own btree traversal - which is generally pretty simple code, but there are a few subtleties. This adds new new functions, bch_btree_map_nodes() and bch_btree_map_keys(), which do the traversal for you. Everything that's open coding btree traversal now (with the exception of garbage collection) is slowly going to be converted to these two functions; being able to write other code at a higher level of abstraction is a big improvement w.r.t. overall code quality. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c33
-rw-r--r--drivers/md/bcache/btree.c174
-rw-r--r--drivers/md/bcache/btree.h37
-rw-r--r--drivers/md/bcache/writeback.c37
5 files changed, 186 insertions, 97 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 674e2f42e778..20fe96c121d9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -384,8 +384,6 @@ struct keybuf_key {
384 void *private; 384 void *private;
385}; 385};
386 386
387typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
388
389struct keybuf { 387struct keybuf {
390 struct bkey last_scanned; 388 struct bkey last_scanned;
391 spinlock_t lock; 389 spinlock_t lock;
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index d0512e451dda..14c2a23d3884 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -842,6 +842,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
842 842
843/* Btree iterator */ 843/* Btree iterator */
844 844
845/*
846 * Returns true if l > r - unless l == r, in which case returns true if l is
847 * older than r.
848 *
849 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
850 * equal in different sets, we have to process them newest to oldest.
851 */
845static inline bool btree_iter_cmp(struct btree_iter_set l, 852static inline bool btree_iter_cmp(struct btree_iter_set l,
846 struct btree_iter_set r) 853 struct btree_iter_set r)
847{ 854{
@@ -1146,16 +1153,16 @@ out:
1146/* Sysfs stuff */ 1153/* Sysfs stuff */
1147 1154
1148struct bset_stats { 1155struct bset_stats {
1156 struct btree_op op;
1149 size_t nodes; 1157 size_t nodes;
1150 size_t sets_written, sets_unwritten; 1158 size_t sets_written, sets_unwritten;
1151 size_t bytes_written, bytes_unwritten; 1159 size_t bytes_written, bytes_unwritten;
1152 size_t floats, failed; 1160 size_t floats, failed;
1153}; 1161};
1154 1162
1155static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, 1163static int btree_bset_stats(struct btree_op *op, struct btree *b)
1156 struct bset_stats *stats)
1157{ 1164{
1158 struct bkey *k; 1165 struct bset_stats *stats = container_of(op, struct bset_stats, op);
1159 unsigned i; 1166 unsigned i;
1160 1167
1161 stats->nodes++; 1168 stats->nodes++;
@@ -1180,30 +1187,20 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
1180 } 1187 }
1181 } 1188 }
1182 1189
1183 if (b->level) { 1190 return MAP_CONTINUE;
1184 struct btree_iter iter;
1185
1186 for_each_key_filter(b, k, &iter, bch_ptr_bad) {
1187 int ret = btree(bset_stats, k, b, op, stats);
1188 if (ret)
1189 return ret;
1190 }
1191 }
1192
1193 return 0;
1194} 1191}
1195 1192
1196int bch_bset_print_stats(struct cache_set *c, char *buf) 1193int bch_bset_print_stats(struct cache_set *c, char *buf)
1197{ 1194{
1198 struct btree_op op;
1199 struct bset_stats t; 1195 struct bset_stats t;
1200 int ret; 1196 int ret;
1201 1197
1202 bch_btree_op_init_stack(&op);
1203 memset(&t, 0, sizeof(struct bset_stats)); 1198 memset(&t, 0, sizeof(struct bset_stats));
1199 bch_btree_op_init_stack(&t.op);
1200 t.op.c = c;
1204 1201
1205 ret = btree_root(bset_stats, c, &op, &t); 1202 ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
1206 if (ret) 1203 if (ret < 0)
1207 return ret; 1204 return ret;
1208 1205
1209 return snprintf(buf, PAGE_SIZE, 1206 return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 17bfd87fc8f4..cfbdcf349b7f 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2296,6 +2296,82 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2296 return ret; 2296 return ret;
2297} 2297}
2298 2298
2299/* Map across nodes or keys */
2300
2301static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2302 struct bkey *from,
2303 btree_map_nodes_fn *fn, int flags)
2304{
2305 int ret = MAP_CONTINUE;
2306
2307 if (b->level) {
2308 struct bkey *k;
2309 struct btree_iter iter;
2310
2311 bch_btree_iter_init(b, &iter, from);
2312
2313 while ((k = bch_btree_iter_next_filter(&iter, b,
2314 bch_ptr_bad))) {
2315 ret = btree(map_nodes_recurse, k, b,
2316 op, from, fn, flags);
2317 from = NULL;
2318
2319 if (ret != MAP_CONTINUE)
2320 return ret;
2321 }
2322 }
2323
2324 if (!b->level || flags == MAP_ALL_NODES)
2325 ret = fn(op, b);
2326
2327 return ret;
2328}
2329
2330int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2331 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2332{
2333 int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags);
2334 if (closure_blocking(&op->cl))
2335 closure_sync(&op->cl);
2336 return ret;
2337}
2338
2339static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2340 struct bkey *from, btree_map_keys_fn *fn,
2341 int flags)
2342{
2343 int ret = MAP_CONTINUE;
2344 struct bkey *k;
2345 struct btree_iter iter;
2346
2347 bch_btree_iter_init(b, &iter, from);
2348
2349 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
2350 ret = !b->level
2351 ? fn(op, b, k)
2352 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2353 from = NULL;
2354
2355 if (ret != MAP_CONTINUE)
2356 return ret;
2357 }
2358
2359 if (!b->level && (flags & MAP_END_KEY))
2360 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2361 KEY_OFFSET(&b->key), 0));
2362
2363 return ret;
2364}
2365
2366int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2367 struct bkey *from, btree_map_keys_fn *fn, int flags)
2368{
2369 int ret = btree_root(map_keys_recurse, c, op, from, fn, flags);
2370 if (closure_blocking(&op->cl))
2371 closure_sync(&op->cl);
2372 return ret;
2373}
2374
2299/* Keybuf code */ 2375/* Keybuf code */
2300 2376
2301static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) 2377static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
@@ -2314,74 +2390,70 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2314 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); 2390 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2315} 2391}
2316 2392
2317static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, 2393struct refill {
2318 struct keybuf *buf, struct bkey *end, 2394 struct btree_op op;
2319 keybuf_pred_fn *pred) 2395 struct keybuf *buf;
2320{ 2396 struct bkey *end;
2321 struct btree_iter iter; 2397 keybuf_pred_fn *pred;
2322 bch_btree_iter_init(b, &iter, &buf->last_scanned); 2398};
2323
2324 while (!array_freelist_empty(&buf->freelist)) {
2325 struct bkey *k = bch_btree_iter_next_filter(&iter, b,
2326 bch_ptr_bad);
2327
2328 if (!b->level) {
2329 if (!k) {
2330 buf->last_scanned = b->key;
2331 break;
2332 }
2333 2399
2334 buf->last_scanned = *k; 2400static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2335 if (bkey_cmp(&buf->last_scanned, end) >= 0) 2401 struct bkey *k)
2336 break; 2402{
2403 struct refill *refill = container_of(op, struct refill, op);
2404 struct keybuf *buf = refill->buf;
2405 int ret = MAP_CONTINUE;
2337 2406
2338 if (pred(buf, k)) { 2407 if (bkey_cmp(k, refill->end) >= 0) {
2339 struct keybuf_key *w; 2408 ret = MAP_DONE;
2409 goto out;
2410 }
2340 2411
2341 spin_lock(&buf->lock); 2412 if (!KEY_SIZE(k)) /* end key */
2413 goto out;
2342 2414
2343 w = array_alloc(&buf->freelist); 2415 if (refill->pred(buf, k)) {
2416 struct keybuf_key *w;
2344 2417
2345 w->private = NULL; 2418 spin_lock(&buf->lock);
2346 bkey_copy(&w->key, k);
2347 2419
2348 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) 2420 w = array_alloc(&buf->freelist);
2349 array_free(&buf->freelist, w); 2421 if (!w) {
2422 spin_unlock(&buf->lock);
2423 return MAP_DONE;
2424 }
2350 2425
2351 spin_unlock(&buf->lock); 2426 w->private = NULL;
2352 } 2427 bkey_copy(&w->key, k);
2353 } else {
2354 if (!k)
2355 break;
2356 2428
2357 btree(refill_keybuf, k, b, op, buf, end, pred); 2429 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2358 /* 2430 array_free(&buf->freelist, w);
2359 * Might get an error here, but can't really do anything
2360 * and it'll get logged elsewhere. Just read what we
2361 * can.
2362 */
2363 2431
2364 if (bkey_cmp(&buf->last_scanned, end) >= 0) 2432 if (array_freelist_empty(&buf->freelist))
2365 break; 2433 ret = MAP_DONE;
2366 2434
2367 cond_resched(); 2435 spin_unlock(&buf->lock);
2368 }
2369 } 2436 }
2370 2437out:
2371 return 0; 2438 buf->last_scanned = *k;
2439 return ret;
2372} 2440}
2373 2441
2374void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 2442void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2375 struct bkey *end, keybuf_pred_fn *pred) 2443 struct bkey *end, keybuf_pred_fn *pred)
2376{ 2444{
2377 struct bkey start = buf->last_scanned; 2445 struct bkey start = buf->last_scanned;
2378 struct btree_op op; 2446 struct refill refill;
2379 bch_btree_op_init_stack(&op);
2380 2447
2381 cond_resched(); 2448 cond_resched();
2382 2449
2383 btree_root(refill_keybuf, c, &op, buf, end, pred); 2450 bch_btree_op_init_stack(&refill.op);
2384 closure_sync(&op.cl); 2451 refill.buf = buf;
2452 refill.end = end;
2453 refill.pred = pred;
2454
2455 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2456 refill_keybuf_fn, MAP_END_KEY);
2385 2457
2386 pr_debug("found %s keys from %llu:%llu to %llu:%llu", 2458 pr_debug("found %s keys from %llu:%llu to %llu:%llu",
2387 RB_EMPTY_ROOT(&buf->keys) ? "no" : 2459 RB_EMPTY_ROOT(&buf->keys) ? "no" :
@@ -2465,9 +2537,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2465} 2537}
2466 2538
2467struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, 2539struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2468 struct keybuf *buf, 2540 struct keybuf *buf,
2469 struct bkey *end, 2541 struct bkey *end,
2470 keybuf_pred_fn *pred) 2542 keybuf_pred_fn *pred)
2471{ 2543{
2472 struct keybuf_key *ret; 2544 struct keybuf_key *ret;
2473 2545
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index fa9641aaed39..cafdeb01e219 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -400,9 +400,42 @@ static inline void wake_up_gc(struct cache_set *c)
400 wake_up_process(c->gc_thread); 400 wake_up_process(c->gc_thread);
401} 401}
402 402
403#define MAP_DONE 0
404#define MAP_CONTINUE 1
405
406#define MAP_ALL_NODES 0
407#define MAP_LEAF_NODES 1
408
409#define MAP_END_KEY 1
410
411typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
412int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
413 struct bkey *, btree_map_nodes_fn *, int);
414
415static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
416 struct bkey *from, btree_map_nodes_fn *fn)
417{
418 return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
419}
420
421static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
422 struct cache_set *c,
423 struct bkey *from,
424 btree_map_nodes_fn *fn)
425{
426 return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
427}
428
429typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
430 struct bkey *);
431int bch_btree_map_keys(struct btree_op *, struct cache_set *,
432 struct bkey *, btree_map_keys_fn *, int);
433
434typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
435
403void bch_keybuf_init(struct keybuf *); 436void bch_keybuf_init(struct keybuf *);
404void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, 437void bch_refill_keybuf(struct cache_set *, struct keybuf *,
405 keybuf_pred_fn *); 438 struct bkey *, keybuf_pred_fn *);
406bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, 439bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
407 struct bkey *); 440 struct bkey *);
408void bch_keybuf_del(struct keybuf *, struct keybuf_key *); 441void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 4392f3f38d62..c68de9f12618 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -433,31 +433,17 @@ static int bch_writeback_thread(void *arg)
433 433
434/* Init */ 434/* Init */
435 435
436static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op, 436static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
437 struct cached_dev *dc) 437 struct bkey *k)
438{ 438{
439 struct bkey *k; 439 if (KEY_INODE(k) > op->inode)
440 struct btree_iter iter; 440 return MAP_DONE;
441
442 bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
443 while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
444 if (!b->level) {
445 if (KEY_INODE(k) > dc->disk.id)
446 break;
447
448 if (KEY_DIRTY(k))
449 bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
450 KEY_START(k),
451 KEY_SIZE(k));
452 } else {
453 btree(sectors_dirty_init, k, b, op, dc);
454 if (KEY_INODE(k) > dc->disk.id)
455 break;
456
457 cond_resched();
458 }
459 441
460 return 0; 442 if (KEY_DIRTY(k))
443 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
444 KEY_START(k), KEY_SIZE(k));
445
446 return MAP_CONTINUE;
461} 447}
462 448
463void bch_sectors_dirty_init(struct cached_dev *dc) 449void bch_sectors_dirty_init(struct cached_dev *dc)
@@ -465,7 +451,10 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
465 struct btree_op op; 451 struct btree_op op;
466 452
467 bch_btree_op_init_stack(&op); 453 bch_btree_op_init_stack(&op);
468 btree_root(sectors_dirty_init, dc->disk.c, &op, dc); 454 op.inode = dc->disk.id;
455
456 bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0),
457 sectors_dirty_init_fn, 0);
469} 458}
470 459
471int bch_cached_dev_writeback_init(struct cached_dev *dc) 460int bch_cached_dev_writeback_init(struct cached_dev *dc)