aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2014-07-06 15:09:59 -0400
committerChris Mason <clm@fb.com>2014-09-17 16:37:30 -0400
commit27a3507de91e3dd51a2cf8dca4b33623dd6eaa88 (patch)
treeef0a15fb445a2006a305cc9d61c918eb60d6c034 /fs/btrfs/extent_io.c
parent6f84e23646704c93fa878c5b87a4990be8d1ca9c (diff)
Btrfs: reduce size of struct extent_state
The tree field of struct extent_state was only used to figure out if an extent state was connected to an inode's io tree or not. For this we can just use the rb_node field itself. On a x86_64 system with this change the sizeof(struct extent_state) is reduced from 96 bytes down to 88 bytes, meaning that with a page size of 4096 bytes we can now store 46 extent states per page instead of 42. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c37
1 files changed, 20 insertions, 17 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1009fa8a08ef..816e80e678bd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -25,6 +25,11 @@ static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache; 25static struct kmem_cache *extent_buffer_cache;
26static struct bio_set *btrfs_bioset; 26static struct bio_set *btrfs_bioset;
27 27
28static inline bool extent_state_in_tree(const struct extent_state *state)
29{
30 return !RB_EMPTY_NODE(&state->rb_node);
31}
32
28#ifdef CONFIG_BTRFS_DEBUG 33#ifdef CONFIG_BTRFS_DEBUG
29static LIST_HEAD(buffers); 34static LIST_HEAD(buffers);
30static LIST_HEAD(states); 35static LIST_HEAD(states);
@@ -59,9 +64,9 @@ void btrfs_leak_debug_check(void)
59 64
60 while (!list_empty(&states)) { 65 while (!list_empty(&states)) {
61 state = list_entry(states.next, struct extent_state, leak_list); 66 state = list_entry(states.next, struct extent_state, leak_list);
62 printk(KERN_ERR "BTRFS: state leak: start %llu end %llu " 67 pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
63 "state %lu in tree %p refs %d\n", 68 state->start, state->end, state->state,
64 state->start, state->end, state->state, state->tree, 69 extent_state_in_tree(state),
65 atomic_read(&state->refs)); 70 atomic_read(&state->refs));
66 list_del(&state->leak_list); 71 list_del(&state->leak_list);
67 kmem_cache_free(extent_state_cache, state); 72 kmem_cache_free(extent_state_cache, state);
@@ -209,7 +214,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
209 return state; 214 return state;
210 state->state = 0; 215 state->state = 0;
211 state->private = 0; 216 state->private = 0;
212 state->tree = NULL; 217 RB_CLEAR_NODE(&state->rb_node);
213 btrfs_leak_debug_add(&state->leak_list, &states); 218 btrfs_leak_debug_add(&state->leak_list, &states);
214 atomic_set(&state->refs, 1); 219 atomic_set(&state->refs, 1);
215 init_waitqueue_head(&state->wq); 220 init_waitqueue_head(&state->wq);
@@ -222,7 +227,7 @@ void free_extent_state(struct extent_state *state)
222 if (!state) 227 if (!state)
223 return; 228 return;
224 if (atomic_dec_and_test(&state->refs)) { 229 if (atomic_dec_and_test(&state->refs)) {
225 WARN_ON(state->tree); 230 WARN_ON(extent_state_in_tree(state));
226 btrfs_leak_debug_del(&state->leak_list); 231 btrfs_leak_debug_del(&state->leak_list);
227 trace_free_extent_state(state, _RET_IP_); 232 trace_free_extent_state(state, _RET_IP_);
228 kmem_cache_free(extent_state_cache, state); 233 kmem_cache_free(extent_state_cache, state);
@@ -371,8 +376,8 @@ static void merge_state(struct extent_io_tree *tree,
371 other->state == state->state) { 376 other->state == state->state) {
372 merge_cb(tree, state, other); 377 merge_cb(tree, state, other);
373 state->start = other->start; 378 state->start = other->start;
374 other->tree = NULL;
375 rb_erase(&other->rb_node, &tree->state); 379 rb_erase(&other->rb_node, &tree->state);
380 RB_CLEAR_NODE(&other->rb_node);
376 free_extent_state(other); 381 free_extent_state(other);
377 } 382 }
378 } 383 }
@@ -383,8 +388,8 @@ static void merge_state(struct extent_io_tree *tree,
383 other->state == state->state) { 388 other->state == state->state) {
384 merge_cb(tree, state, other); 389 merge_cb(tree, state, other);
385 state->end = other->end; 390 state->end = other->end;
386 other->tree = NULL;
387 rb_erase(&other->rb_node, &tree->state); 391 rb_erase(&other->rb_node, &tree->state);
392 RB_CLEAR_NODE(&other->rb_node);
388 free_extent_state(other); 393 free_extent_state(other);
389 } 394 }
390 } 395 }
@@ -442,7 +447,6 @@ static int insert_state(struct extent_io_tree *tree,
442 found->start, found->end, start, end); 447 found->start, found->end, start, end);
443 return -EEXIST; 448 return -EEXIST;
444 } 449 }
445 state->tree = tree;
446 merge_state(tree, state); 450 merge_state(tree, state);
447 return 0; 451 return 0;
448} 452}
@@ -486,7 +490,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
486 free_extent_state(prealloc); 490 free_extent_state(prealloc);
487 return -EEXIST; 491 return -EEXIST;
488 } 492 }
489 prealloc->tree = tree;
490 return 0; 493 return 0;
491} 494}
492 495
@@ -524,9 +527,9 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
524 wake_up(&state->wq); 527 wake_up(&state->wq);
525 if (state->state == 0) { 528 if (state->state == 0) {
526 next = next_state(state); 529 next = next_state(state);
527 if (state->tree) { 530 if (extent_state_in_tree(state)) {
528 rb_erase(&state->rb_node, &tree->state); 531 rb_erase(&state->rb_node, &tree->state);
529 state->tree = NULL; 532 RB_CLEAR_NODE(&state->rb_node);
530 free_extent_state(state); 533 free_extent_state(state);
531 } else { 534 } else {
532 WARN_ON(1); 535 WARN_ON(1);
@@ -606,8 +609,8 @@ again:
606 cached_state = NULL; 609 cached_state = NULL;
607 } 610 }
608 611
609 if (cached && cached->tree && cached->start <= start && 612 if (cached && extent_state_in_tree(cached) &&
610 cached->end > start) { 613 cached->start <= start && cached->end > start) {
611 if (clear) 614 if (clear)
612 atomic_dec(&cached->refs); 615 atomic_dec(&cached->refs);
613 state = cached; 616 state = cached;
@@ -843,7 +846,7 @@ again:
843 if (cached_state && *cached_state) { 846 if (cached_state && *cached_state) {
844 state = *cached_state; 847 state = *cached_state;
845 if (state->start <= start && state->end > start && 848 if (state->start <= start && state->end > start &&
846 state->tree) { 849 extent_state_in_tree(state)) {
847 node = &state->rb_node; 850 node = &state->rb_node;
848 goto hit_next; 851 goto hit_next;
849 } 852 }
@@ -1069,7 +1072,7 @@ again:
1069 if (cached_state && *cached_state) { 1072 if (cached_state && *cached_state) {
1070 state = *cached_state; 1073 state = *cached_state;
1071 if (state->start <= start && state->end > start && 1074 if (state->start <= start && state->end > start &&
1072 state->tree) { 1075 extent_state_in_tree(state)) {
1073 node = &state->rb_node; 1076 node = &state->rb_node;
1074 goto hit_next; 1077 goto hit_next;
1075 } 1078 }
@@ -1459,7 +1462,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1459 spin_lock(&tree->lock); 1462 spin_lock(&tree->lock);
1460 if (cached_state && *cached_state) { 1463 if (cached_state && *cached_state) {
1461 state = *cached_state; 1464 state = *cached_state;
1462 if (state->end == start - 1 && state->tree) { 1465 if (state->end == start - 1 && extent_state_in_tree(state)) {
1463 n = rb_next(&state->rb_node); 1466 n = rb_next(&state->rb_node);
1464 while (n) { 1467 while (n) {
1465 state = rb_entry(n, struct extent_state, 1468 state = rb_entry(n, struct extent_state,
@@ -1905,7 +1908,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1905 int bitset = 0; 1908 int bitset = 0;
1906 1909
1907 spin_lock(&tree->lock); 1910 spin_lock(&tree->lock);
1908 if (cached && cached->tree && cached->start <= start && 1911 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1909 cached->end > start) 1912 cached->end > start)
1910 node = &cached->rb_node; 1913 node = &cached->rb_node;
1911 else 1914 else