aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorEric Sandeen <sandeen@redhat.com>2013-04-22 12:12:31 -0400
committerJosef Bacik <jbacik@fusionio.com>2013-05-06 15:55:16 -0400
commit6d49ba1b47b9c6822d08f90af6f1a2d8ca1cf533 (patch)
tree316890f9d5ce407a6767b279620ee3131b6c8e75 /fs/btrfs/extent_io.c
parentace68bac61b338e52924d87ebdd0fa8c7439f896 (diff)
btrfs: move leak debug code to functions
Clean up the leak debugging in extent_io.c by moving the debug code into functions. This also removes the list_heads used for debugging from the extent_buffer and extent_state structures when debug is not enabled. Since we need a global debug config to do that last part, implement CONFIG_BTRFS_DEBUG to accommodate. Thanks to Dave Sterba for the Kconfig bit. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Reviewed-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c113
1 files changed, 58 insertions, 55 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d9a82f261e04..f110d12de2d5 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -24,12 +24,62 @@
24static struct kmem_cache *extent_state_cache; 24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache; 25static struct kmem_cache *extent_buffer_cache;
26 26
27#ifdef CONFIG_BTRFS_DEBUG
27static LIST_HEAD(buffers); 28static LIST_HEAD(buffers);
28static LIST_HEAD(states); 29static LIST_HEAD(states);
29 30
30#define LEAK_DEBUG 0
31#if LEAK_DEBUG
32static DEFINE_SPINLOCK(leak_lock); 31static DEFINE_SPINLOCK(leak_lock);
32
33static inline
34void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
35{
36 unsigned long flags;
37
38 spin_lock_irqsave(&leak_lock, flags);
39 list_add(new, head);
40 spin_unlock_irqrestore(&leak_lock, flags);
41}
42
43static inline
44void btrfs_leak_debug_del(struct list_head *entry)
45{
46 unsigned long flags;
47
48 spin_lock_irqsave(&leak_lock, flags);
49 list_del(entry);
50 spin_unlock_irqrestore(&leak_lock, flags);
51}
52
53static inline
54void btrfs_leak_debug_check(void)
55{
56 struct extent_state *state;
57 struct extent_buffer *eb;
58
59 while (!list_empty(&states)) {
60 state = list_entry(states.next, struct extent_state, leak_list);
61 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
62 "state %lu in tree %p refs %d\n",
63 (unsigned long long)state->start,
64 (unsigned long long)state->end,
65 state->state, state->tree, atomic_read(&state->refs));
66 list_del(&state->leak_list);
67 kmem_cache_free(extent_state_cache, state);
68 }
69
70 while (!list_empty(&buffers)) {
71 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
72 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
73 "refs %d\n", (unsigned long long)eb->start,
74 eb->len, atomic_read(&eb->refs));
75 list_del(&eb->leak_list);
76 kmem_cache_free(extent_buffer_cache, eb);
77 }
78}
79#else
80#define btrfs_leak_debug_add(new, head) do {} while (0)
81#define btrfs_leak_debug_del(entry) do {} while (0)
82#define btrfs_leak_debug_check() do {} while (0)
33#endif 83#endif
34 84
35#define BUFFER_LRU_MAX 64 85#define BUFFER_LRU_MAX 64
@@ -84,29 +134,7 @@ free_state_cache:
84 134
85void extent_io_exit(void) 135void extent_io_exit(void)
86{ 136{
87 struct extent_state *state; 137 btrfs_leak_debug_check();
88 struct extent_buffer *eb;
89
90 while (!list_empty(&states)) {
91 state = list_entry(states.next, struct extent_state, leak_list);
92 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
93 "state %lu in tree %p refs %d\n",
94 (unsigned long long)state->start,
95 (unsigned long long)state->end,
96 state->state, state->tree, atomic_read(&state->refs));
97 list_del(&state->leak_list);
98 kmem_cache_free(extent_state_cache, state);
99
100 }
101
102 while (!list_empty(&buffers)) {
103 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
104 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
105 "refs %d\n", (unsigned long long)eb->start,
106 eb->len, atomic_read(&eb->refs));
107 list_del(&eb->leak_list);
108 kmem_cache_free(extent_buffer_cache, eb);
109 }
110 138
111 /* 139 /*
112 * Make sure all delayed rcu free are flushed before we 140 * Make sure all delayed rcu free are flushed before we
@@ -134,9 +162,6 @@ void extent_io_tree_init(struct extent_io_tree *tree,
134static struct extent_state *alloc_extent_state(gfp_t mask) 162static struct extent_state *alloc_extent_state(gfp_t mask)
135{ 163{
136 struct extent_state *state; 164 struct extent_state *state;
137#if LEAK_DEBUG
138 unsigned long flags;
139#endif
140 165
141 state = kmem_cache_alloc(extent_state_cache, mask); 166 state = kmem_cache_alloc(extent_state_cache, mask);
142 if (!state) 167 if (!state)
@@ -144,11 +169,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
144 state->state = 0; 169 state->state = 0;
145 state->private = 0; 170 state->private = 0;
146 state->tree = NULL; 171 state->tree = NULL;
147#if LEAK_DEBUG 172 btrfs_leak_debug_add(&state->leak_list, &states);
148 spin_lock_irqsave(&leak_lock, flags);
149 list_add(&state->leak_list, &states);
150 spin_unlock_irqrestore(&leak_lock, flags);
151#endif
152 atomic_set(&state->refs, 1); 173 atomic_set(&state->refs, 1);
153 init_waitqueue_head(&state->wq); 174 init_waitqueue_head(&state->wq);
154 trace_alloc_extent_state(state, mask, _RET_IP_); 175 trace_alloc_extent_state(state, mask, _RET_IP_);
@@ -160,15 +181,8 @@ void free_extent_state(struct extent_state *state)
160 if (!state) 181 if (!state)
161 return; 182 return;
162 if (atomic_dec_and_test(&state->refs)) { 183 if (atomic_dec_and_test(&state->refs)) {
163#if LEAK_DEBUG
164 unsigned long flags;
165#endif
166 WARN_ON(state->tree); 184 WARN_ON(state->tree);
167#if LEAK_DEBUG 185 btrfs_leak_debug_del(&state->leak_list);
168 spin_lock_irqsave(&leak_lock, flags);
169 list_del(&state->leak_list);
170 spin_unlock_irqrestore(&leak_lock, flags);
171#endif
172 trace_free_extent_state(state, _RET_IP_); 186 trace_free_extent_state(state, _RET_IP_);
173 kmem_cache_free(extent_state_cache, state); 187 kmem_cache_free(extent_state_cache, state);
174 } 188 }
@@ -4065,12 +4079,7 @@ out:
4065 4079
4066static void __free_extent_buffer(struct extent_buffer *eb) 4080static void __free_extent_buffer(struct extent_buffer *eb)
4067{ 4081{
4068#if LEAK_DEBUG 4082 btrfs_leak_debug_del(&eb->leak_list);
4069 unsigned long flags;
4070 spin_lock_irqsave(&leak_lock, flags);
4071 list_del(&eb->leak_list);
4072 spin_unlock_irqrestore(&leak_lock, flags);
4073#endif
4074 kmem_cache_free(extent_buffer_cache, eb); 4083 kmem_cache_free(extent_buffer_cache, eb);
4075} 4084}
4076 4085
@@ -4080,9 +4089,6 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
4080 gfp_t mask) 4089 gfp_t mask)
4081{ 4090{
4082 struct extent_buffer *eb = NULL; 4091 struct extent_buffer *eb = NULL;
4083#if LEAK_DEBUG
4084 unsigned long flags;
4085#endif
4086 4092
4087 eb = kmem_cache_zalloc(extent_buffer_cache, mask); 4093 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4088 if (eb == NULL) 4094 if (eb == NULL)
@@ -4102,11 +4108,8 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
4102 init_waitqueue_head(&eb->write_lock_wq); 4108 init_waitqueue_head(&eb->write_lock_wq);
4103 init_waitqueue_head(&eb->read_lock_wq); 4109 init_waitqueue_head(&eb->read_lock_wq);
4104 4110
4105#if LEAK_DEBUG 4111 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4106 spin_lock_irqsave(&leak_lock, flags); 4112
4107 list_add(&eb->leak_list, &buffers);
4108 spin_unlock_irqrestore(&leak_lock, flags);
4109#endif
4110 spin_lock_init(&eb->refs_lock); 4113 spin_lock_init(&eb->refs_lock);
4111 atomic_set(&eb->refs, 1); 4114 atomic_set(&eb->refs, 1);
4112 atomic_set(&eb->io_pages, 0); 4115 atomic_set(&eb->io_pages, 0);