aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-17 04:29:34 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:09 -0500
commit78365411b344df35a198b119133e6515c2dcfb9f (patch)
treee94c2e1bd0d5dc53e6a938b012e9b20d3a511eca /include
parent1dd13c8d3c2d82e1b668d0b4754591291656542a (diff)
bcache: Rework allocator reserves
We need a reserve for allocating buckets for new btree nodes - and now that we've got multiple btrees, it really needs to be per btree. This reworks the reserves so we've got separate freelists for each reserve instead of watermarks, which seems to make things a bit cleaner, and it adds some code so that btree_split() can make sure the reserve is available before it starts. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'include')
-rw-r--r--include/trace/events/bcache.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 095c6e4fe1e8..0c5cf2f63dc3 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -411,7 +411,7 @@ TRACE_EVENT(bcache_alloc_invalidate,
411 ), 411 ),
412 412
413 TP_fast_assign( 413 TP_fast_assign(
414 __entry->free = fifo_used(&ca->free); 414 __entry->free = fifo_used(&ca->free[RESERVE_NONE]);
415 __entry->free_inc = fifo_used(&ca->free_inc); 415 __entry->free_inc = fifo_used(&ca->free_inc);
416 __entry->free_inc_size = ca->free_inc.size; 416 __entry->free_inc_size = ca->free_inc.size;
417 __entry->unused = fifo_used(&ca->unused); 417 __entry->unused = fifo_used(&ca->unused);
@@ -422,8 +422,8 @@ TRACE_EVENT(bcache_alloc_invalidate,
422); 422);
423 423
424TRACE_EVENT(bcache_alloc_fail, 424TRACE_EVENT(bcache_alloc_fail,
425 TP_PROTO(struct cache *ca), 425 TP_PROTO(struct cache *ca, unsigned reserve),
426 TP_ARGS(ca), 426 TP_ARGS(ca, reserve),
427 427
428 TP_STRUCT__entry( 428 TP_STRUCT__entry(
429 __field(unsigned, free ) 429 __field(unsigned, free )
@@ -433,7 +433,7 @@ TRACE_EVENT(bcache_alloc_fail,
433 ), 433 ),
434 434
435 TP_fast_assign( 435 TP_fast_assign(
436 __entry->free = fifo_used(&ca->free); 436 __entry->free = fifo_used(&ca->free[reserve]);
437 __entry->free_inc = fifo_used(&ca->free_inc); 437 __entry->free_inc = fifo_used(&ca->free_inc);
438 __entry->unused = fifo_used(&ca->unused); 438 __entry->unused = fifo_used(&ca->unused);
439 __entry->blocked = atomic_read(&ca->set->prio_blocked); 439 __entry->blocked = atomic_read(&ca->set->prio_blocked);