aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/btree.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-04-26 18:39:55 -0400
committerKent Overstreet <koverstreet@google.com>2013-06-26 20:09:15 -0400
commitc37511b863f36c1cc6e18440717fd4cc0e881b8a (patch)
tree64d82c648bd092f38c35c4b808411bc1cdb3a9f0 /drivers/md/bcache/btree.c
parent5794351146199b9ac67a5ab1beab82be8bfd7b5d (diff)
bcache: Fix/revamp tracepoints
The tracepoints were reworked to be more sensible, and fixed a null pointer deref in one of the tracepoints. Converted some of the pr_debug()s to tracepoints - this is partly a performance optimization; it used to be that with DEBUG or CONFIG_DYNAMIC_DEBUG pr_debug() was an empty macro; but at some point it was changed to an empty inline function. Some of the pr_debug() statements had rather expensive function calls as part of the arguments, so this code was getting run unnecessarily even on non debug kernels - in some fast paths, too. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r--drivers/md/bcache/btree.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index aaec186f7ba6..218d486259a3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -223,8 +223,9 @@ void bch_btree_node_read(struct btree *b)
223 struct closure cl; 223 struct closure cl;
224 struct bio *bio; 224 struct bio *bio;
225 225
226 trace_bcache_btree_read(b);
227
226 closure_init_stack(&cl); 228 closure_init_stack(&cl);
227 pr_debug("%s", pbtree(b));
228 229
229 bio = bch_bbio_alloc(b->c); 230 bio = bch_bbio_alloc(b->c);
230 bio->bi_rw = REQ_META|READ_SYNC; 231 bio->bi_rw = REQ_META|READ_SYNC;
@@ -234,7 +235,6 @@ void bch_btree_node_read(struct btree *b)
234 235
235 bch_bio_map(bio, b->sets[0].data); 236 bch_bio_map(bio, b->sets[0].data);
236 237
237 trace_bcache_btree_read(bio);
238 bch_submit_bbio(bio, b->c, &b->key, 0); 238 bch_submit_bbio(bio, b->c, &b->key, 0);
239 closure_sync(&cl); 239 closure_sync(&cl);
240 240
@@ -343,7 +343,6 @@ static void do_btree_node_write(struct btree *b)
343 memcpy(page_address(bv->bv_page), 343 memcpy(page_address(bv->bv_page),
344 base + j * PAGE_SIZE, PAGE_SIZE); 344 base + j * PAGE_SIZE, PAGE_SIZE);
345 345
346 trace_bcache_btree_write(b->bio);
347 bch_submit_bbio(b->bio, b->c, &k.key, 0); 346 bch_submit_bbio(b->bio, b->c, &k.key, 0);
348 347
349 continue_at(cl, btree_node_write_done, NULL); 348 continue_at(cl, btree_node_write_done, NULL);
@@ -351,7 +350,6 @@ static void do_btree_node_write(struct btree *b)
351 b->bio->bi_vcnt = 0; 350 b->bio->bi_vcnt = 0;
352 bch_bio_map(b->bio, i); 351 bch_bio_map(b->bio, i);
353 352
354 trace_bcache_btree_write(b->bio);
355 bch_submit_bbio(b->bio, b->c, &k.key, 0); 353 bch_submit_bbio(b->bio, b->c, &k.key, 0);
356 354
357 closure_sync(cl); 355 closure_sync(cl);
@@ -363,10 +361,13 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
363{ 361{
364 struct bset *i = b->sets[b->nsets].data; 362 struct bset *i = b->sets[b->nsets].data;
365 363
364 trace_bcache_btree_write(b);
365
366 BUG_ON(current->bio_list); 366 BUG_ON(current->bio_list);
367 BUG_ON(b->written >= btree_blocks(b)); 367 BUG_ON(b->written >= btree_blocks(b));
368 BUG_ON(b->written && !i->keys); 368 BUG_ON(b->written && !i->keys);
369 BUG_ON(b->sets->data->seq != i->seq); 369 BUG_ON(b->sets->data->seq != i->seq);
370 bch_check_key_order(b, i);
370 371
371 cancel_delayed_work(&b->work); 372 cancel_delayed_work(&b->work);
372 373
@@ -376,12 +377,8 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
376 clear_bit(BTREE_NODE_dirty, &b->flags); 377 clear_bit(BTREE_NODE_dirty, &b->flags);
377 change_bit(BTREE_NODE_write_idx, &b->flags); 378 change_bit(BTREE_NODE_write_idx, &b->flags);
378 379
379 bch_check_key_order(b, i);
380
381 do_btree_node_write(b); 380 do_btree_node_write(b);
382 381
383 pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys);
384
385 b->written += set_blocks(i, b->c); 382 b->written += set_blocks(i, b->c);
386 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size, 383 atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
387 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); 384 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
@@ -752,6 +749,8 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
752 int ret = -ENOMEM; 749 int ret = -ENOMEM;
753 struct btree *i; 750 struct btree *i;
754 751
752 trace_bcache_btree_cache_cannibalize(c);
753
755 if (!cl) 754 if (!cl)
756 return ERR_PTR(-ENOMEM); 755 return ERR_PTR(-ENOMEM);
757 756
@@ -770,7 +769,6 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
770 return ERR_PTR(-EAGAIN); 769 return ERR_PTR(-EAGAIN);
771 } 770 }
772 771
773 /* XXX: tracepoint */
774 c->try_harder = cl; 772 c->try_harder = cl;
775 c->try_harder_start = local_clock(); 773 c->try_harder_start = local_clock();
776retry: 774retry:
@@ -956,13 +954,14 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
956{ 954{
957 unsigned i; 955 unsigned i;
958 956
957 trace_bcache_btree_node_free(b);
958
959 /* 959 /*
960 * The BUG_ON() in btree_node_get() implies that we must have a write 960 * The BUG_ON() in btree_node_get() implies that we must have a write
961 * lock on parent to free or even invalidate a node 961 * lock on parent to free or even invalidate a node
962 */ 962 */
963 BUG_ON(op->lock <= b->level); 963 BUG_ON(op->lock <= b->level);
964 BUG_ON(b == b->c->root); 964 BUG_ON(b == b->c->root);
965 pr_debug("bucket %s", pbtree(b));
966 965
967 if (btree_node_dirty(b)) 966 if (btree_node_dirty(b))
968 btree_complete_write(b, btree_current_write(b)); 967 btree_complete_write(b, btree_current_write(b));
@@ -1012,12 +1011,16 @@ retry:
1012 bch_bset_init_next(b); 1011 bch_bset_init_next(b);
1013 1012
1014 mutex_unlock(&c->bucket_lock); 1013 mutex_unlock(&c->bucket_lock);
1014
1015 trace_bcache_btree_node_alloc(b);
1015 return b; 1016 return b;
1016err_free: 1017err_free:
1017 bch_bucket_free(c, &k.key); 1018 bch_bucket_free(c, &k.key);
1018 __bkey_put(c, &k.key); 1019 __bkey_put(c, &k.key);
1019err: 1020err:
1020 mutex_unlock(&c->bucket_lock); 1021 mutex_unlock(&c->bucket_lock);
1022
1023 trace_bcache_btree_node_alloc_fail(b);
1021 return b; 1024 return b;
1022} 1025}
1023 1026
@@ -1254,7 +1257,7 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
1254 btree_node_free(r->b, op); 1257 btree_node_free(r->b, op);
1255 up_write(&r->b->lock); 1258 up_write(&r->b->lock);
1256 1259
1257 pr_debug("coalesced %u nodes", nodes); 1260 trace_bcache_btree_gc_coalesce(nodes);
1258 1261
1259 gc->nodes--; 1262 gc->nodes--;
1260 nodes--; 1263 nodes--;
@@ -1479,8 +1482,7 @@ static void bch_btree_gc(struct closure *cl)
1479 struct btree_op op; 1482 struct btree_op op;
1480 uint64_t start_time = local_clock(); 1483 uint64_t start_time = local_clock();
1481 1484
1482 trace_bcache_gc_start(c->sb.set_uuid); 1485 trace_bcache_gc_start(c);
1483 blktrace_msg_all(c, "Starting gc");
1484 1486
1485 memset(&stats, 0, sizeof(struct gc_stat)); 1487 memset(&stats, 0, sizeof(struct gc_stat));
1486 closure_init_stack(&writes); 1488 closure_init_stack(&writes);
@@ -1496,9 +1498,7 @@ static void bch_btree_gc(struct closure *cl)
1496 closure_sync(&writes); 1498 closure_sync(&writes);
1497 1499
1498 if (ret) { 1500 if (ret) {
1499 blktrace_msg_all(c, "Stopped gc");
1500 pr_warn("gc failed!"); 1501 pr_warn("gc failed!");
1501
1502 continue_at(cl, bch_btree_gc, bch_gc_wq); 1502 continue_at(cl, bch_btree_gc, bch_gc_wq);
1503 } 1503 }
1504 1504
@@ -1519,8 +1519,7 @@ static void bch_btree_gc(struct closure *cl)
1519 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets; 1519 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1520 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); 1520 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1521 1521
1522 blktrace_msg_all(c, "Finished gc"); 1522 trace_bcache_gc_end(c);
1523 trace_bcache_gc_end(c->sb.set_uuid);
1524 1523
1525 continue_at(cl, bch_moving_gc, bch_gc_wq); 1524 continue_at(cl, bch_moving_gc, bch_gc_wq);
1526} 1525}
@@ -1901,12 +1900,11 @@ static int btree_split(struct btree *b, struct btree_op *op)
1901 1900
1902 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5; 1901 split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
1903 1902
1904 pr_debug("%ssplitting at %s keys %i", split ? "" : "not ",
1905 pbtree(b), n1->sets[0].data->keys);
1906
1907 if (split) { 1903 if (split) {
1908 unsigned keys = 0; 1904 unsigned keys = 0;
1909 1905
1906 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
1907
1910 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); 1908 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
1911 if (IS_ERR(n2)) 1909 if (IS_ERR(n2))
1912 goto err_free1; 1910 goto err_free1;
@@ -1941,8 +1939,11 @@ static int btree_split(struct btree *b, struct btree_op *op)
1941 bch_keylist_add(&op->keys, &n2->key); 1939 bch_keylist_add(&op->keys, &n2->key);
1942 bch_btree_node_write(n2, &op->cl); 1940 bch_btree_node_write(n2, &op->cl);
1943 rw_unlock(true, n2); 1941 rw_unlock(true, n2);
1944 } else 1942 } else {
1943 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
1944
1945 bch_btree_insert_keys(n1, op); 1945 bch_btree_insert_keys(n1, op);
1946 }
1946 1947
1947 bch_keylist_add(&op->keys, &n1->key); 1948 bch_keylist_add(&op->keys, &n1->key);
1948 bch_btree_node_write(n1, &op->cl); 1949 bch_btree_node_write(n1, &op->cl);
@@ -2117,6 +2118,8 @@ void bch_btree_set_root(struct btree *b)
2117{ 2118{
2118 unsigned i; 2119 unsigned i;
2119 2120
2121 trace_bcache_btree_set_root(b);
2122
2120 BUG_ON(!b->written); 2123 BUG_ON(!b->written);
2121 2124
2122 for (i = 0; i < KEY_PTRS(&b->key); i++) 2125 for (i = 0; i < KEY_PTRS(&b->key); i++)
@@ -2130,7 +2133,6 @@ void bch_btree_set_root(struct btree *b)
2130 __bkey_put(b->c, &b->key); 2133 __bkey_put(b->c, &b->key);
2131 2134
2132 bch_journal_meta(b->c, NULL); 2135 bch_journal_meta(b->c, NULL);
2133 pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0));
2134} 2136}
2135 2137
2136/* Cache lookup */ 2138/* Cache lookup */
@@ -2216,7 +2218,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2216 n->bi_end_io = bch_cache_read_endio; 2218 n->bi_end_io = bch_cache_read_endio;
2217 n->bi_private = &s->cl; 2219 n->bi_private = &s->cl;
2218 2220
2219 trace_bcache_cache_hit(n);
2220 __bch_submit_bbio(n, b->c); 2221 __bch_submit_bbio(n, b->c);
2221 } 2222 }
2222 2223