aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-03-26 16:24:23 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:01 -0400
commit2d2ae547979854d10b75d557b3abdb3eb7511bbc (patch)
tree9a3bab510a9fadb2b8be27497041675ad0108938 /fs/btrfs/extent_io.c
parent83041add611056e830e29fda913029e37e857239 (diff)
Btrfs: Add leak debugging for extent_buffer and extent_state
This also fixes one leak around the super block when failing to mount the FS. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 6dab664529c1..b99cfabd008e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -26,6 +26,7 @@ static struct kmem_cache *extent_buffer_cache;
26 26
27static LIST_HEAD(buffers); 27static LIST_HEAD(buffers);
28static LIST_HEAD(states); 28static LIST_HEAD(states);
29static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
29 30
30#define BUFFER_LRU_MAX 64 31#define BUFFER_LRU_MAX 64
31 32
@@ -64,15 +65,22 @@ free_state_cache:
64void extent_io_exit(void) 65void extent_io_exit(void)
65{ 66{
66 struct extent_state *state; 67 struct extent_state *state;
68 struct extent_buffer *eb;
67 69
68 while (!list_empty(&states)) { 70 while (!list_empty(&states)) {
69 state = list_entry(states.next, struct extent_state, list); 71 state = list_entry(states.next, struct extent_state, leak_list);
70 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs)); 72 printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
71 list_del(&state->list); 73 list_del(&state->leak_list);
72 kmem_cache_free(extent_state_cache, state); 74 kmem_cache_free(extent_state_cache, state);
73 75
74 } 76 }
75 77
78 while (!list_empty(&buffers)) {
79 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
80 printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
81 list_del(&eb->leak_list);
82 kmem_cache_free(extent_buffer_cache, eb);
83 }
76 if (extent_state_cache) 84 if (extent_state_cache)
77 kmem_cache_destroy(extent_state_cache); 85 kmem_cache_destroy(extent_state_cache);
78 if (extent_buffer_cache) 86 if (extent_buffer_cache)
@@ -109,6 +117,7 @@ EXPORT_SYMBOL(extent_io_tree_empty_lru);
109struct extent_state *alloc_extent_state(gfp_t mask) 117struct extent_state *alloc_extent_state(gfp_t mask)
110{ 118{
111 struct extent_state *state; 119 struct extent_state *state;
120 unsigned long flags;
112 121
113 state = kmem_cache_alloc(extent_state_cache, mask); 122 state = kmem_cache_alloc(extent_state_cache, mask);
114 if (!state || IS_ERR(state)) 123 if (!state || IS_ERR(state))
@@ -116,6 +125,9 @@ struct extent_state *alloc_extent_state(gfp_t mask)
116 state->state = 0; 125 state->state = 0;
117 state->private = 0; 126 state->private = 0;
118 state->tree = NULL; 127 state->tree = NULL;
128 spin_lock_irqsave(&leak_lock, flags);
129 list_add(&state->leak_list, &states);
130 spin_unlock_irqrestore(&leak_lock, flags);
119 131
120 atomic_set(&state->refs, 1); 132 atomic_set(&state->refs, 1);
121 init_waitqueue_head(&state->wq); 133 init_waitqueue_head(&state->wq);
@@ -128,7 +140,11 @@ void free_extent_state(struct extent_state *state)
128 if (!state) 140 if (!state)
129 return; 141 return;
130 if (atomic_dec_and_test(&state->refs)) { 142 if (atomic_dec_and_test(&state->refs)) {
143 unsigned long flags;
131 WARN_ON(state->tree); 144 WARN_ON(state->tree);
145 spin_lock_irqsave(&leak_lock, flags);
146 list_del(&state->leak_list);
147 spin_unlock_irqrestore(&leak_lock, flags);
132 kmem_cache_free(extent_state_cache, state); 148 kmem_cache_free(extent_state_cache, state);
133 } 149 }
134} 150}
@@ -2582,6 +2598,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2582 gfp_t mask) 2598 gfp_t mask)
2583{ 2599{
2584 struct extent_buffer *eb = NULL; 2600 struct extent_buffer *eb = NULL;
2601 unsigned long flags;
2585 2602
2586 spin_lock(&tree->lru_lock); 2603 spin_lock(&tree->lru_lock);
2587 eb = find_lru(tree, start, len); 2604 eb = find_lru(tree, start, len);
@@ -2594,6 +2611,9 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2594 INIT_LIST_HEAD(&eb->lru); 2611 INIT_LIST_HEAD(&eb->lru);
2595 eb->start = start; 2612 eb->start = start;
2596 eb->len = len; 2613 eb->len = len;
2614 spin_lock_irqsave(&leak_lock, flags);
2615 list_add(&eb->leak_list, &buffers);
2616 spin_unlock_irqrestore(&leak_lock, flags);
2597 atomic_set(&eb->refs, 1); 2617 atomic_set(&eb->refs, 1);
2598 2618
2599 return eb; 2619 return eb;
@@ -2601,6 +2621,10 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2601 2621
2602static void __free_extent_buffer(struct extent_buffer *eb) 2622static void __free_extent_buffer(struct extent_buffer *eb)
2603{ 2623{
2624 unsigned long flags;
2625 spin_lock_irqsave(&leak_lock, flags);
2626 list_del(&eb->leak_list);
2627 spin_unlock_irqrestore(&leak_lock, flags);
2604 kmem_cache_free(extent_buffer_cache, eb); 2628 kmem_cache_free(extent_buffer_cache, eb);
2605} 2629}
2606 2630