aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2014-02-12 21:43:32 -0500
committerKent Overstreet <kmo@daterainc.com>2014-03-18 15:22:35 -0400
commit7159b1ad3dded9da040b5c608acf3d52d50f661e (patch)
tree3ec196333d8ae22e359dc7d16fe8d48b8352fbea
parent3f5e0a34daed197aa55d0c6b466bb4cd03babb4f (diff)
bcache: Better alloc tracepoints
Change the invalidate tracepoint to indicate how much data we're invalidating, and change the alloc tracepoints to indicate what offset they're for. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--drivers/md/bcache/alloc.c15
-rw-r--r--drivers/md/bcache/trace.c2
-rw-r--r--include/trace/events/bcache.h48
3 files changed, 46 insertions, 19 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index c0d37d082443..a3e1427945f2 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -162,10 +162,15 @@ static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
162 162
163static void invalidate_one_bucket(struct cache *ca, struct bucket *b) 163static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
164{ 164{
165 size_t bucket = b - ca->buckets;
166
167 if (GC_SECTORS_USED(b))
168 trace_bcache_invalidate(ca, bucket);
169
165 bch_inc_gen(ca, b); 170 bch_inc_gen(ca, b);
166 b->prio = INITIAL_PRIO; 171 b->prio = INITIAL_PRIO;
167 atomic_inc(&b->pin); 172 atomic_inc(&b->pin);
168 fifo_push(&ca->free_inc, b - ca->buckets); 173 fifo_push(&ca->free_inc, bucket);
169} 174}
170 175
171/* 176/*
@@ -301,8 +306,6 @@ static void invalidate_buckets(struct cache *ca)
301 invalidate_buckets_random(ca); 306 invalidate_buckets_random(ca);
302 break; 307 break;
303 } 308 }
304
305 trace_bcache_alloc_invalidate(ca);
306} 309}
307 310
308#define allocator_wait(ca, cond) \ 311#define allocator_wait(ca, cond) \
@@ -408,8 +411,10 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
408 fifo_pop(&ca->free[reserve], r)) 411 fifo_pop(&ca->free[reserve], r))
409 goto out; 412 goto out;
410 413
411 if (!wait) 414 if (!wait) {
415 trace_bcache_alloc_fail(ca, reserve);
412 return -1; 416 return -1;
417 }
413 418
414 do { 419 do {
415 prepare_to_wait(&ca->set->bucket_wait, &w, 420 prepare_to_wait(&ca->set->bucket_wait, &w,
@@ -425,6 +430,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
425out: 430out:
426 wake_up_process(ca->alloc_thread); 431 wake_up_process(ca->alloc_thread);
427 432
433 trace_bcache_alloc(ca, reserve);
434
428 if (expensive_debug_checks(ca->set)) { 435 if (expensive_debug_checks(ca->set)) {
429 size_t iter; 436 size_t iter;
430 long i; 437 long i;
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index adbc3df17a80..b7820b0d2621 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -45,7 +45,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
45EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact); 45EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
46EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root); 46EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
47 47
48EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate); 48EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_invalidate);
49EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail); 49EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
50 50
51EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback); 51EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 7110897c3dfa..8fc2a7134d3c 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -399,26 +399,43 @@ TRACE_EVENT(bcache_keyscan,
399 399
400/* Allocator */ 400/* Allocator */
401 401
402TRACE_EVENT(bcache_alloc_invalidate, 402TRACE_EVENT(bcache_invalidate,
403 TP_PROTO(struct cache *ca), 403 TP_PROTO(struct cache *ca, size_t bucket),
404 TP_ARGS(ca), 404 TP_ARGS(ca, bucket),
405 405
406 TP_STRUCT__entry( 406 TP_STRUCT__entry(
407 __field(unsigned, free ) 407 __field(unsigned, sectors )
408 __field(unsigned, free_inc ) 408 __field(dev_t, dev )
409 __field(unsigned, free_inc_size ) 409 __field(__u64, offset )
410 __field(unsigned, unused )
411 ), 410 ),
412 411
413 TP_fast_assign( 412 TP_fast_assign(
414 __entry->free = fifo_used(&ca->free[RESERVE_NONE]); 413 __entry->dev = ca->bdev->bd_dev;
415 __entry->free_inc = fifo_used(&ca->free_inc); 414 __entry->offset = bucket << ca->set->bucket_bits;
416 __entry->free_inc_size = ca->free_inc.size; 415 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
417 __entry->unused = fifo_used(&ca->unused);
418 ), 416 ),
419 417
420 TP_printk("free %u free_inc %u/%u unused %u", __entry->free, 418 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
421 __entry->free_inc, __entry->free_inc_size, __entry->unused) 419 __entry->sectors, MAJOR(__entry->dev),
420 MINOR(__entry->dev), __entry->offset)
421);
422
423TRACE_EVENT(bcache_alloc,
424 TP_PROTO(struct cache *ca, size_t bucket),
425 TP_ARGS(ca, bucket),
426
427 TP_STRUCT__entry(
428 __field(dev_t, dev )
429 __field(__u64, offset )
430 ),
431
432 TP_fast_assign(
433 __entry->dev = ca->bdev->bd_dev;
434 __entry->offset = bucket << ca->set->bucket_bits;
435 ),
436
437 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
438 MINOR(__entry->dev), __entry->offset)
422); 439);
423 440
424TRACE_EVENT(bcache_alloc_fail, 441TRACE_EVENT(bcache_alloc_fail,
@@ -426,6 +443,7 @@ TRACE_EVENT(bcache_alloc_fail,
426 TP_ARGS(ca, reserve), 443 TP_ARGS(ca, reserve),
427 444
428 TP_STRUCT__entry( 445 TP_STRUCT__entry(
446 __field(dev_t, dev )
429 __field(unsigned, free ) 447 __field(unsigned, free )
430 __field(unsigned, free_inc ) 448 __field(unsigned, free_inc )
431 __field(unsigned, unused ) 449 __field(unsigned, unused )
@@ -433,13 +451,15 @@ TRACE_EVENT(bcache_alloc_fail,
433 ), 451 ),
434 452
435 TP_fast_assign( 453 TP_fast_assign(
454 __entry->dev = ca->bdev->bd_dev;
436 __entry->free = fifo_used(&ca->free[reserve]); 455 __entry->free = fifo_used(&ca->free[reserve]);
437 __entry->free_inc = fifo_used(&ca->free_inc); 456 __entry->free_inc = fifo_used(&ca->free_inc);
438 __entry->unused = fifo_used(&ca->unused); 457 __entry->unused = fifo_used(&ca->unused);
439 __entry->blocked = atomic_read(&ca->set->prio_blocked); 458 __entry->blocked = atomic_read(&ca->set->prio_blocked);
440 ), 459 ),
441 460
442 TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free, 461 TP_printk("alloc fail %d,%d free %u free_inc %u unused %u blocked %u",
462 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
443 __entry->free_inc, __entry->unused, __entry->blocked) 463 __entry->free_inc, __entry->unused, __entry->blocked)
444); 464);
445 465