aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_stat.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-05-16 00:24:36 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-06-01 19:17:35 -0400
commit8f184f27300f66f6dcc8296c2dae7a1fbe8429c9 (patch)
treea21aa4d88a11217bdd9eaaf31b2189d8b6b45b5b /kernel/trace/trace_stat.c
parent0d64f8342de26d02451900b1aad94716fe92c4ab (diff)
tracing/stat: replace linked list by an rbtree for sorting
When the stat tracing framework prepares the entries from a tracer to output them to the user, it starts by computing a linear sort through a linked list to give the entries ordered by relevance to the user. This is quite ugly and causes a small latency when we begin to read the file. This patch changes that by turning the linked list into a red-black tree. Athough the whole iteration using the start and next tracer callbacks while opening the file remain the same, it is now much more fast and scalable. The rbtree guarantees O(log(n)) insertions whereas a linked list with linear sorting brought us a O(n) despair. Now the (visible) latency has disapeared. [ Impact: kill the latency while starting to read a stat tracer file ] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace/trace_stat.c')
-rw-r--r--kernel/trace/trace_stat.c140
1 files changed, 100 insertions, 40 deletions
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 3b6816be825d..0bd0fc82da5d 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Infrastructure for statistic tracing (histogram output). 2 * Infrastructure for statistic tracing (histogram output).
3 * 3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> 4 * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * 5 *
6 * Based on the code from trace_branch.c which is 6 * Based on the code from trace_branch.c which is
7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 7 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
@@ -10,14 +10,19 @@
10 10
11 11
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/rbtree.h>
13#include <linux/debugfs.h> 14#include <linux/debugfs.h>
14#include "trace_stat.h" 15#include "trace_stat.h"
15#include "trace.h" 16#include "trace.h"
16 17
17 18
18/* List of stat entries from a tracer */ 19/*
19struct trace_stat_list { 20 * List of stat red-black nodes from a tracer
20 struct list_head list; 21 * We use a such tree to sort quickly the stat
22 * entries from the tracer.
23 */
24struct stat_node {
25 struct rb_node node;
21 void *stat; 26 void *stat;
22}; 27};
23 28
@@ -25,7 +30,7 @@ struct trace_stat_list {
25struct stat_session { 30struct stat_session {
26 struct list_head session_list; 31 struct list_head session_list;
27 struct tracer_stat *ts; 32 struct tracer_stat *ts;
28 struct list_head stat_list; 33 struct rb_root stat_root;
29 struct mutex stat_mutex; 34 struct mutex stat_mutex;
30 struct dentry *file; 35 struct dentry *file;
31}; 36};
@@ -37,15 +42,45 @@ static DEFINE_MUTEX(all_stat_sessions_mutex);
37/* The root directory for all stat files */ 42/* The root directory for all stat files */
38static struct dentry *stat_dir; 43static struct dentry *stat_dir;
39 44
45/*
46 * Iterate through the rbtree using a post order traversal path
47 * to release the next node.
48 * It won't necessary release one at each iteration
49 * but it will at least advance closer to the next one
50 * to be released.
51 */
52static struct rb_node *release_next(struct rb_node *node)
53{
54 struct stat_node *snode;
55 struct rb_node *parent = rb_parent(node);
56
57 if (node->rb_left)
58 return node->rb_left;
59 else if (node->rb_right)
60 return node->rb_right;
61 else {
62 if (!parent)
63 return NULL;
64 if (parent->rb_left == node)
65 parent->rb_left = NULL;
66 else
67 parent->rb_right = NULL;
68
69 snode = container_of(node, struct stat_node, node);
70 kfree(snode);
71
72 return parent;
73 }
74}
40 75
41static void reset_stat_session(struct stat_session *session) 76static void reset_stat_session(struct stat_session *session)
42{ 77{
43 struct trace_stat_list *node, *next; 78 struct rb_node *node = session->stat_root.rb_node;
44 79
45 list_for_each_entry_safe(node, next, &session->stat_list, list) 80 while (node)
46 kfree(node); 81 node = release_next(node);
47 82
48 INIT_LIST_HEAD(&session->stat_list); 83 session->stat_root = RB_ROOT;
49} 84}
50 85
51static void destroy_session(struct stat_session *session) 86static void destroy_session(struct stat_session *session)
@@ -56,6 +91,35 @@ static void destroy_session(struct stat_session *session)
56 kfree(session); 91 kfree(session);
57} 92}
58 93
94typedef int (*cmp_stat_t)(void *, void *);
95
96static void
97insert_stat(struct rb_root *root, struct stat_node *data, cmp_stat_t cmp)
98{
99 struct rb_node **new = &(root->rb_node), *parent = NULL;
100
101 /*
102 * Figure out where to put new node
103 * This is a descendent sorting
104 */
105 while (*new) {
106 struct stat_node *this;
107 int result;
108
109 this = container_of(*new, struct stat_node, node);
110 result = cmp(data->stat, this->stat);
111
112 parent = *new;
113 if (result >= 0)
114 new = &((*new)->rb_left);
115 else
116 new = &((*new)->rb_right);
117 }
118
119 rb_link_node(&data->node, parent, new);
120 rb_insert_color(&data->node, root);
121}
122
59/* 123/*
60 * For tracers that don't provide a stat_cmp callback. 124 * For tracers that don't provide a stat_cmp callback.
61 * This one will force an immediate insertion on tail of 125 * This one will force an immediate insertion on tail of
@@ -73,8 +137,9 @@ static int dummy_cmp(void *p1, void *p2)
73 */ 137 */
74static int stat_seq_init(struct stat_session *session) 138static int stat_seq_init(struct stat_session *session)
75{ 139{
76 struct trace_stat_list *iter_entry, *new_entry;
77 struct tracer_stat *ts = session->ts; 140 struct tracer_stat *ts = session->ts;
141 struct stat_node *new_entry;
142 struct rb_root *root;
78 void *stat; 143 void *stat;
79 int ret = 0; 144 int ret = 0;
80 int i; 145 int i;
@@ -93,15 +158,13 @@ static int stat_seq_init(struct stat_session *session)
93 * The first entry. Actually this is the second, but the first 158 * The first entry. Actually this is the second, but the first
94 * one (the stat_list head) is pointless. 159 * one (the stat_list head) is pointless.
95 */ 160 */
96 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); 161 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
97 if (!new_entry) { 162 if (!new_entry) {
98 ret = -ENOMEM; 163 ret = -ENOMEM;
99 goto exit; 164 goto exit;
100 } 165 }
101 166 root = &session->stat_root;
102 INIT_LIST_HEAD(&new_entry->list); 167 insert_stat(root, new_entry, dummy_cmp);
103
104 list_add(&new_entry->list, &session->stat_list);
105 168
106 new_entry->stat = stat; 169 new_entry->stat = stat;
107 170
@@ -116,31 +179,17 @@ static int stat_seq_init(struct stat_session *session)
116 if (!stat) 179 if (!stat)
117 break; 180 break;
118 181
119 new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); 182 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
120 if (!new_entry) { 183 if (!new_entry) {
121 ret = -ENOMEM; 184 ret = -ENOMEM;
122 goto exit_free_list; 185 goto exit_free_list;
123 } 186 }
124 187
125 INIT_LIST_HEAD(&new_entry->list);
126 new_entry->stat = stat; 188 new_entry->stat = stat;
127 189
128 list_for_each_entry_reverse(iter_entry, &session->stat_list, 190 insert_stat(root, new_entry, ts->stat_cmp);
129 list) {
130
131 /* Insertion with a descendent sorting */
132 if (ts->stat_cmp(iter_entry->stat,
133 new_entry->stat) >= 0) {
134
135 list_add(&new_entry->list, &iter_entry->list);
136 break;
137 }
138 }
139
140 /* The current larger value */
141 if (list_empty(&new_entry->list))
142 list_add(&new_entry->list, &session->stat_list);
143 } 191 }
192
144exit: 193exit:
145 mutex_unlock(&session->stat_mutex); 194 mutex_unlock(&session->stat_mutex);
146 return ret; 195 return ret;
@@ -155,25 +204,38 @@ exit_free_list:
155static void *stat_seq_start(struct seq_file *s, loff_t *pos) 204static void *stat_seq_start(struct seq_file *s, loff_t *pos)
156{ 205{
157 struct stat_session *session = s->private; 206 struct stat_session *session = s->private;
207 struct rb_node *node;
208 int i;
158 209
159 /* Prevent from tracer switch or stat_list modification */ 210 /* Prevent from tracer switch or stat_list modification */
160 mutex_lock(&session->stat_mutex); 211 mutex_lock(&session->stat_mutex);
161 212
162 /* If we are in the beginning of the file, print the headers */ 213 /* If we are in the beginning of the file, print the headers */
163 if (!*pos && session->ts->stat_headers) 214 if (!*pos && session->ts->stat_headers) {
215 (*pos)++;
164 return SEQ_START_TOKEN; 216 return SEQ_START_TOKEN;
217 }
165 218
166 return seq_list_start(&session->stat_list, *pos); 219 node = rb_first(&session->stat_root);
220 for (i = 0; node && i < *pos; i++)
221 node = rb_next(node);
222
223 (*pos)++;
224
225 return node;
167} 226}
168 227
169static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) 228static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
170{ 229{
171 struct stat_session *session = s->private; 230 struct stat_session *session = s->private;
231 struct rb_node *node = p;
232
233 (*pos)++;
172 234
173 if (p == SEQ_START_TOKEN) 235 if (p == SEQ_START_TOKEN)
174 return seq_list_start(&session->stat_list, *pos); 236 return rb_first(&session->stat_root);
175 237
176 return seq_list_next(p, &session->stat_list, pos); 238 return rb_next(node);
177} 239}
178 240
179static void stat_seq_stop(struct seq_file *s, void *p) 241static void stat_seq_stop(struct seq_file *s, void *p)
@@ -185,7 +247,7 @@ static void stat_seq_stop(struct seq_file *s, void *p)
185static int stat_seq_show(struct seq_file *s, void *v) 247static int stat_seq_show(struct seq_file *s, void *v)
186{ 248{
187 struct stat_session *session = s->private; 249 struct stat_session *session = s->private;
188 struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); 250 struct stat_node *l = container_of(v, struct stat_node, node);
189 251
190 if (v == SEQ_START_TOKEN) 252 if (v == SEQ_START_TOKEN)
191 return session->ts->stat_headers(s); 253 return session->ts->stat_headers(s);
@@ -286,15 +348,13 @@ int register_stat_tracer(struct tracer_stat *trace)
286 mutex_unlock(&all_stat_sessions_mutex); 348 mutex_unlock(&all_stat_sessions_mutex);
287 349
288 /* Init the session */ 350 /* Init the session */
289 session = kmalloc(sizeof(struct stat_session), GFP_KERNEL); 351 session = kzalloc(sizeof(*session), GFP_KERNEL);
290 if (!session) 352 if (!session)
291 return -ENOMEM; 353 return -ENOMEM;
292 354
293 session->ts = trace; 355 session->ts = trace;
294 INIT_LIST_HEAD(&session->session_list); 356 INIT_LIST_HEAD(&session->session_list);
295 INIT_LIST_HEAD(&session->stat_list);
296 mutex_init(&session->stat_mutex); 357 mutex_init(&session->stat_mutex);
297 session->file = NULL;
298 358
299 ret = init_stat_file(session); 359 ret = init_stat_file(session);
300 if (ret) { 360 if (ret) {