aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-01-06 15:33:30 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-07 04:45:14 -0500
commitff288b274a9b383046fdbda4be3067daba4d5fe8 (patch)
tree9b140f23d6e3bf325ed4f8978a3ddc14c8ccd16d /kernel/trace
parent431aa3fbf5bbe3be79809c7e603c2ed2ac64b015 (diff)
tracing/ftrace: fix a memory leak in stat tracing
Impact: fix memory leak This patch fixes a memory leak inside reset_stat_list(). The freeing loop iterated only once. Also turn the stat_list into a simple struct list_head, which simplify the code and avoid an unused static pointer. Reported-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_stat.c39
1 files changed, 15 insertions, 24 deletions
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 6f194a33a64a..4cb4ff27646d 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -21,7 +21,7 @@ struct trace_stat_list {
21 void *stat; 21 void *stat;
22}; 22};
23 23
24static struct trace_stat_list stat_list; 24static LIST_HEAD(stat_list);
25 25
26/* 26/*
27 * This is a copy of the current tracer to avoid racy 27 * This is a copy of the current tracer to avoid racy
@@ -39,22 +39,12 @@ static DEFINE_MUTEX(stat_list_mutex);
39 39
40static void reset_stat_list(void) 40static void reset_stat_list(void)
41{ 41{
42 struct trace_stat_list *node; 42 struct trace_stat_list *node, *next;
43 struct list_head *next;
44 43
45 if (list_empty(&stat_list.list)) 44 list_for_each_entry_safe(node, next, &stat_list, list)
46 return;
47
48 node = list_entry(stat_list.list.next, struct trace_stat_list, list);
49 next = node->list.next;
50
51 while (&node->list != next) {
52 kfree(node); 45 kfree(node);
53 node = list_entry(next, struct trace_stat_list, list);
54 }
55 kfree(node);
56 46
57 INIT_LIST_HEAD(&stat_list.list); 47 INIT_LIST_HEAD(&stat_list);
58} 48}
59 49
60void init_tracer_stat(struct tracer *trace) 50void init_tracer_stat(struct tracer *trace)
@@ -107,7 +97,7 @@ static int stat_seq_init(void)
107 } 97 }
108 98
109 INIT_LIST_HEAD(&new_entry->list); 99 INIT_LIST_HEAD(&new_entry->list);
110 list_add(&new_entry->list, &stat_list.list); 100 list_add(&new_entry->list, &stat_list);
111 new_entry->stat = current_tracer.stat_start(); 101 new_entry->stat = current_tracer.stat_start();
112 102
113 prev_stat = new_entry->stat; 103 prev_stat = new_entry->stat;
@@ -130,7 +120,7 @@ static int stat_seq_init(void)
130 if (!new_entry->stat) 120 if (!new_entry->stat)
131 break; 121 break;
132 122
133 list_for_each_entry(iter_entry, &stat_list.list, list) { 123 list_for_each_entry(iter_entry, &stat_list, list) {
134 /* Insertion with a descendent sorting */ 124 /* Insertion with a descendent sorting */
135 if (current_tracer.stat_cmp(new_entry->stat, 125 if (current_tracer.stat_cmp(new_entry->stat,
136 iter_entry->stat) > 0) { 126 iter_entry->stat) > 0) {
@@ -141,7 +131,7 @@ static int stat_seq_init(void)
141 131
142 /* The current smaller value */ 132 /* The current smaller value */
143 } else if (list_is_last(&iter_entry->list, 133 } else if (list_is_last(&iter_entry->list,
144 &stat_list.list)) { 134 &stat_list)) {
145 list_add(&new_entry->list, &iter_entry->list); 135 list_add(&new_entry->list, &iter_entry->list);
146 break; 136 break;
147 } 137 }
@@ -162,7 +152,7 @@ exit_free_list:
162 152
163static void *stat_seq_start(struct seq_file *s, loff_t *pos) 153static void *stat_seq_start(struct seq_file *s, loff_t *pos)
164{ 154{
165 struct trace_stat_list *l = (struct trace_stat_list *)s->private; 155 struct list_head *l = (struct list_head *)s->private;
166 156
167 /* Prevent from tracer switch or stat_list modification */ 157 /* Prevent from tracer switch or stat_list modification */
168 mutex_lock(&stat_list_mutex); 158 mutex_lock(&stat_list_mutex);
@@ -171,14 +161,14 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
171 if (!*pos && current_tracer.stat_headers) 161 if (!*pos && current_tracer.stat_headers)
172 current_tracer.stat_headers(s); 162 current_tracer.stat_headers(s);
173 163
174 return seq_list_start(&l->list, *pos); 164 return seq_list_start(l, *pos);
175} 165}
176 166
177static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) 167static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
178{ 168{
179 struct trace_stat_list *l = (struct trace_stat_list *)s->private; 169 struct list_head *l = (struct list_head *)s->private;
180 170
181 return seq_list_next(p, &l->list, pos); 171 return seq_list_next(p, l, pos);
182} 172}
183 173
184static void stat_seq_stop(struct seq_file *m, void *p) 174static void stat_seq_stop(struct seq_file *m, void *p)
@@ -188,8 +178,10 @@ static void stat_seq_stop(struct seq_file *m, void *p)
188 178
189static int stat_seq_show(struct seq_file *s, void *v) 179static int stat_seq_show(struct seq_file *s, void *v)
190{ 180{
191 struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); 181 struct trace_stat_list *entry =
192 return current_tracer.stat_show(s, l->stat); 182 list_entry(v, struct trace_stat_list, list);
183
184 return current_tracer.stat_show(s, entry->stat);
193} 185}
194 186
195static const struct seq_operations trace_stat_seq_ops = { 187static const struct seq_operations trace_stat_seq_ops = {
@@ -237,7 +229,6 @@ static int __init tracing_stat_init(void)
237 struct dentry *d_tracing; 229 struct dentry *d_tracing;
238 struct dentry *entry; 230 struct dentry *entry;
239 231
240 INIT_LIST_HEAD(&stat_list.list);
241 d_tracing = tracing_init_dentry(); 232 d_tracing = tracing_init_dentry();
242 233
243 entry = debugfs_create_file("trace_stat", 0444, d_tracing, 234 entry = debugfs_create_file("trace_stat", 0444, d_tracing,