diff options
-rw-r--r-- | include/trace/events/workqueue.h | 100 | ||||
-rw-r--r-- | include/trace/workqueue.h | 25 | ||||
-rw-r--r-- | kernel/trace/trace_stat.c | 206 | ||||
-rw-r--r-- | kernel/trace/trace_workqueue.c | 23 | ||||
-rw-r--r-- | kernel/workqueue.c | 11 |
5 files changed, 234 insertions, 131 deletions
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h new file mode 100644 index 000000000000..035f1bff288e --- /dev/null +++ b/include/trace/events/workqueue.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_WORKQUEUE_H | ||
3 | |||
4 | #include <linux/workqueue.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM workqueue | ||
10 | |||
11 | TRACE_EVENT(workqueue_insertion, | ||
12 | |||
13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
14 | |||
15 | TP_ARGS(wq_thread, work), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __array(char, thread_comm, TASK_COMM_LEN) | ||
19 | __field(pid_t, thread_pid) | ||
20 | __field(work_func_t, func) | ||
21 | ), | ||
22 | |||
23 | TP_fast_assign( | ||
24 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
25 | __entry->thread_pid = wq_thread->pid; | ||
26 | __entry->func = work->func; | ||
27 | ), | ||
28 | |||
29 | TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, | ||
30 | __entry->thread_pid, __entry->func) | ||
31 | ); | ||
32 | |||
33 | TRACE_EVENT(workqueue_execution, | ||
34 | |||
35 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
36 | |||
37 | TP_ARGS(wq_thread, work), | ||
38 | |||
39 | TP_STRUCT__entry( | ||
40 | __array(char, thread_comm, TASK_COMM_LEN) | ||
41 | __field(pid_t, thread_pid) | ||
42 | __field(work_func_t, func) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
47 | __entry->thread_pid = wq_thread->pid; | ||
48 | __entry->func = work->func; | ||
49 | ), | ||
50 | |||
51 | TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, | ||
52 | __entry->thread_pid, __entry->func) | ||
53 | ); | ||
54 | |||
55 | /* Trace the creation of one workqueue thread on a cpu */ | ||
56 | TRACE_EVENT(workqueue_creation, | ||
57 | |||
58 | TP_PROTO(struct task_struct *wq_thread, int cpu), | ||
59 | |||
60 | TP_ARGS(wq_thread, cpu), | ||
61 | |||
62 | TP_STRUCT__entry( | ||
63 | __array(char, thread_comm, TASK_COMM_LEN) | ||
64 | __field(pid_t, thread_pid) | ||
65 | __field(int, cpu) | ||
66 | ), | ||
67 | |||
68 | TP_fast_assign( | ||
69 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
70 | __entry->thread_pid = wq_thread->pid; | ||
71 | __entry->cpu = cpu; | ||
72 | ), | ||
73 | |||
74 | TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm, | ||
75 | __entry->thread_pid, __entry->cpu) | ||
76 | ); | ||
77 | |||
78 | TRACE_EVENT(workqueue_destruction, | ||
79 | |||
80 | TP_PROTO(struct task_struct *wq_thread), | ||
81 | |||
82 | TP_ARGS(wq_thread), | ||
83 | |||
84 | TP_STRUCT__entry( | ||
85 | __array(char, thread_comm, TASK_COMM_LEN) | ||
86 | __field(pid_t, thread_pid) | ||
87 | ), | ||
88 | |||
89 | TP_fast_assign( | ||
90 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
91 | __entry->thread_pid = wq_thread->pid; | ||
92 | ), | ||
93 | |||
94 | TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid) | ||
95 | ); | ||
96 | |||
97 | #endif /* _TRACE_WORKQUEUE_H */ | ||
98 | |||
99 | /* This part must be outside protection */ | ||
100 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h deleted file mode 100644 index 7626523deeba..000000000000 --- a/include/trace/workqueue.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | #ifndef __TRACE_WORKQUEUE_H | ||
2 | #define __TRACE_WORKQUEUE_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | #include <linux/sched.h> | ||
7 | |||
8 | DECLARE_TRACE(workqueue_insertion, | ||
9 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
10 | TP_ARGS(wq_thread, work)); | ||
11 | |||
12 | DECLARE_TRACE(workqueue_execution, | ||
13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
14 | TP_ARGS(wq_thread, work)); | ||
15 | |||
16 | /* Trace the creation of one workqueue thread on a cpu */ | ||
17 | DECLARE_TRACE(workqueue_creation, | ||
18 | TP_PROTO(struct task_struct *wq_thread, int cpu), | ||
19 | TP_ARGS(wq_thread, cpu)); | ||
20 | |||
21 | DECLARE_TRACE(workqueue_destruction, | ||
22 | TP_PROTO(struct task_struct *wq_thread), | ||
23 | TP_ARGS(wq_thread)); | ||
24 | |||
25 | #endif /* __TRACE_WORKQUEUE_H */ | ||
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index fdde3a4a94cd..c00643733f4c 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Infrastructure for statistic tracing (histogram output). | 2 | * Infrastructure for statistic tracing (histogram output). |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (C) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * | 5 | * |
6 | * Based on the code from trace_branch.c which is | 6 | * Based on the code from trace_branch.c which is |
7 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 7 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
@@ -10,22 +10,27 @@ | |||
10 | 10 | ||
11 | 11 | ||
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/rbtree.h> | ||
13 | #include <linux/debugfs.h> | 14 | #include <linux/debugfs.h> |
14 | #include "trace_stat.h" | 15 | #include "trace_stat.h" |
15 | #include "trace.h" | 16 | #include "trace.h" |
16 | 17 | ||
17 | 18 | ||
18 | /* List of stat entries from a tracer */ | 19 | /* |
19 | struct trace_stat_list { | 20 | * List of stat red-black nodes from a tracer |
20 | struct list_head list; | 21 | * We use a such tree to sort quickly the stat |
22 | * entries from the tracer. | ||
23 | */ | ||
24 | struct stat_node { | ||
25 | struct rb_node node; | ||
21 | void *stat; | 26 | void *stat; |
22 | }; | 27 | }; |
23 | 28 | ||
24 | /* A stat session is the stats output in one file */ | 29 | /* A stat session is the stats output in one file */ |
25 | struct tracer_stat_session { | 30 | struct stat_session { |
26 | struct list_head session_list; | 31 | struct list_head session_list; |
27 | struct tracer_stat *ts; | 32 | struct tracer_stat *ts; |
28 | struct list_head stat_list; | 33 | struct rb_root stat_root; |
29 | struct mutex stat_mutex; | 34 | struct mutex stat_mutex; |
30 | struct dentry *file; | 35 | struct dentry *file; |
31 | }; | 36 | }; |
@@ -37,18 +42,48 @@ static DEFINE_MUTEX(all_stat_sessions_mutex); | |||
37 | /* The root directory for all stat files */ | 42 | /* The root directory for all stat files */ |
38 | static struct dentry *stat_dir; | 43 | static struct dentry *stat_dir; |
39 | 44 | ||
45 | /* | ||
46 | * Iterate through the rbtree using a post order traversal path | ||
47 | * to release the next node. | ||
48 | * It won't necessary release one at each iteration | ||
49 | * but it will at least advance closer to the next one | ||
50 | * to be released. | ||
51 | */ | ||
52 | static struct rb_node *release_next(struct rb_node *node) | ||
53 | { | ||
54 | struct stat_node *snode; | ||
55 | struct rb_node *parent = rb_parent(node); | ||
56 | |||
57 | if (node->rb_left) | ||
58 | return node->rb_left; | ||
59 | else if (node->rb_right) | ||
60 | return node->rb_right; | ||
61 | else { | ||
62 | if (!parent) | ||
63 | ; | ||
64 | else if (parent->rb_left == node) | ||
65 | parent->rb_left = NULL; | ||
66 | else | ||
67 | parent->rb_right = NULL; | ||
68 | |||
69 | snode = container_of(node, struct stat_node, node); | ||
70 | kfree(snode); | ||
71 | |||
72 | return parent; | ||
73 | } | ||
74 | } | ||
40 | 75 | ||
41 | static void reset_stat_session(struct tracer_stat_session *session) | 76 | static void reset_stat_session(struct stat_session *session) |
42 | { | 77 | { |
43 | struct trace_stat_list *node, *next; | 78 | struct rb_node *node = session->stat_root.rb_node; |
44 | 79 | ||
45 | list_for_each_entry_safe(node, next, &session->stat_list, list) | 80 | while (node) |
46 | kfree(node); | 81 | node = release_next(node); |
47 | 82 | ||
48 | INIT_LIST_HEAD(&session->stat_list); | 83 | session->stat_root = RB_ROOT; |
49 | } | 84 | } |
50 | 85 | ||
51 | static void destroy_session(struct tracer_stat_session *session) | 86 | static void destroy_session(struct stat_session *session) |
52 | { | 87 | { |
53 | debugfs_remove(session->file); | 88 | debugfs_remove(session->file); |
54 | reset_stat_session(session); | 89 | reset_stat_session(session); |
@@ -56,25 +91,60 @@ static void destroy_session(struct tracer_stat_session *session) | |||
56 | kfree(session); | 91 | kfree(session); |
57 | } | 92 | } |
58 | 93 | ||
94 | typedef int (*cmp_stat_t)(void *, void *); | ||
95 | |||
96 | static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp) | ||
97 | { | ||
98 | struct rb_node **new = &(root->rb_node), *parent = NULL; | ||
99 | struct stat_node *data; | ||
100 | |||
101 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
102 | if (!data) | ||
103 | return -ENOMEM; | ||
104 | data->stat = stat; | ||
105 | |||
106 | /* | ||
107 | * Figure out where to put new node | ||
108 | * This is a descendent sorting | ||
109 | */ | ||
110 | while (*new) { | ||
111 | struct stat_node *this; | ||
112 | int result; | ||
113 | |||
114 | this = container_of(*new, struct stat_node, node); | ||
115 | result = cmp(data->stat, this->stat); | ||
116 | |||
117 | parent = *new; | ||
118 | if (result >= 0) | ||
119 | new = &((*new)->rb_left); | ||
120 | else | ||
121 | new = &((*new)->rb_right); | ||
122 | } | ||
123 | |||
124 | rb_link_node(&data->node, parent, new); | ||
125 | rb_insert_color(&data->node, root); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
59 | /* | 129 | /* |
60 | * For tracers that don't provide a stat_cmp callback. | 130 | * For tracers that don't provide a stat_cmp callback. |
61 | * This one will force an immediate insertion on tail of | 131 | * This one will force an insertion as right-most node |
62 | * the list. | 132 | * in the rbtree. |
63 | */ | 133 | */ |
64 | static int dummy_cmp(void *p1, void *p2) | 134 | static int dummy_cmp(void *p1, void *p2) |
65 | { | 135 | { |
66 | return 1; | 136 | return -1; |
67 | } | 137 | } |
68 | 138 | ||
69 | /* | 139 | /* |
70 | * Initialize the stat list at each trace_stat file opening. | 140 | * Initialize the stat rbtree at each trace_stat file opening. |
71 | * All of these copies and sorting are required on all opening | 141 | * All of these copies and sorting are required on all opening |
72 | * since the stats could have changed between two file sessions. | 142 | * since the stats could have changed between two file sessions. |
73 | */ | 143 | */ |
74 | static int stat_seq_init(struct tracer_stat_session *session) | 144 | static int stat_seq_init(struct stat_session *session) |
75 | { | 145 | { |
76 | struct trace_stat_list *iter_entry, *new_entry; | ||
77 | struct tracer_stat *ts = session->ts; | 146 | struct tracer_stat *ts = session->ts; |
147 | struct rb_root *root = &session->stat_root; | ||
78 | void *stat; | 148 | void *stat; |
79 | int ret = 0; | 149 | int ret = 0; |
80 | int i; | 150 | int i; |
@@ -89,25 +159,12 @@ static int stat_seq_init(struct tracer_stat_session *session) | |||
89 | if (!stat) | 159 | if (!stat) |
90 | goto exit; | 160 | goto exit; |
91 | 161 | ||
92 | /* | 162 | ret = insert_stat(root, stat, ts->stat_cmp); |
93 | * The first entry. Actually this is the second, but the first | 163 | if (ret) |
94 | * one (the stat_list head) is pointless. | ||
95 | */ | ||
96 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | ||
97 | if (!new_entry) { | ||
98 | ret = -ENOMEM; | ||
99 | goto exit; | 164 | goto exit; |
100 | } | ||
101 | |||
102 | INIT_LIST_HEAD(&new_entry->list); | ||
103 | |||
104 | list_add(&new_entry->list, &session->stat_list); | ||
105 | |||
106 | new_entry->stat = stat; | ||
107 | 165 | ||
108 | /* | 166 | /* |
109 | * Iterate over the tracer stat entries and store them in a sorted | 167 | * Iterate over the tracer stat entries and store them in an rbtree. |
110 | * list. | ||
111 | */ | 168 | */ |
112 | for (i = 1; ; i++) { | 169 | for (i = 1; ; i++) { |
113 | stat = ts->stat_next(stat, i); | 170 | stat = ts->stat_next(stat, i); |
@@ -116,36 +173,16 @@ static int stat_seq_init(struct tracer_stat_session *session) | |||
116 | if (!stat) | 173 | if (!stat) |
117 | break; | 174 | break; |
118 | 175 | ||
119 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | 176 | ret = insert_stat(root, stat, ts->stat_cmp); |
120 | if (!new_entry) { | 177 | if (ret) |
121 | ret = -ENOMEM; | 178 | goto exit_free_rbtree; |
122 | goto exit_free_list; | ||
123 | } | ||
124 | |||
125 | INIT_LIST_HEAD(&new_entry->list); | ||
126 | new_entry->stat = stat; | ||
127 | |||
128 | list_for_each_entry_reverse(iter_entry, &session->stat_list, | ||
129 | list) { | ||
130 | |||
131 | /* Insertion with a descendent sorting */ | ||
132 | if (ts->stat_cmp(iter_entry->stat, | ||
133 | new_entry->stat) >= 0) { | ||
134 | |||
135 | list_add(&new_entry->list, &iter_entry->list); | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* The current larger value */ | ||
141 | if (list_empty(&new_entry->list)) | ||
142 | list_add(&new_entry->list, &session->stat_list); | ||
143 | } | 179 | } |
180 | |||
144 | exit: | 181 | exit: |
145 | mutex_unlock(&session->stat_mutex); | 182 | mutex_unlock(&session->stat_mutex); |
146 | return ret; | 183 | return ret; |
147 | 184 | ||
148 | exit_free_list: | 185 | exit_free_rbtree: |
149 | reset_stat_session(session); | 186 | reset_stat_session(session); |
150 | mutex_unlock(&session->stat_mutex); | 187 | mutex_unlock(&session->stat_mutex); |
151 | return ret; | 188 | return ret; |
@@ -154,38 +191,51 @@ exit_free_list: | |||
154 | 191 | ||
155 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) | 192 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) |
156 | { | 193 | { |
157 | struct tracer_stat_session *session = s->private; | 194 | struct stat_session *session = s->private; |
195 | struct rb_node *node; | ||
196 | int i; | ||
158 | 197 | ||
159 | /* Prevent from tracer switch or stat_list modification */ | 198 | /* Prevent from tracer switch or rbtree modification */ |
160 | mutex_lock(&session->stat_mutex); | 199 | mutex_lock(&session->stat_mutex); |
161 | 200 | ||
162 | /* If we are in the beginning of the file, print the headers */ | 201 | /* If we are in the beginning of the file, print the headers */ |
163 | if (!*pos && session->ts->stat_headers) | 202 | if (!*pos && session->ts->stat_headers) { |
203 | (*pos)++; | ||
164 | return SEQ_START_TOKEN; | 204 | return SEQ_START_TOKEN; |
205 | } | ||
165 | 206 | ||
166 | return seq_list_start(&session->stat_list, *pos); | 207 | node = rb_first(&session->stat_root); |
208 | for (i = 0; node && i < *pos; i++) | ||
209 | node = rb_next(node); | ||
210 | |||
211 | (*pos)++; | ||
212 | |||
213 | return node; | ||
167 | } | 214 | } |
168 | 215 | ||
169 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) | 216 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) |
170 | { | 217 | { |
171 | struct tracer_stat_session *session = s->private; | 218 | struct stat_session *session = s->private; |
219 | struct rb_node *node = p; | ||
220 | |||
221 | (*pos)++; | ||
172 | 222 | ||
173 | if (p == SEQ_START_TOKEN) | 223 | if (p == SEQ_START_TOKEN) |
174 | return seq_list_start(&session->stat_list, *pos); | 224 | return rb_first(&session->stat_root); |
175 | 225 | ||
176 | return seq_list_next(p, &session->stat_list, pos); | 226 | return rb_next(node); |
177 | } | 227 | } |
178 | 228 | ||
179 | static void stat_seq_stop(struct seq_file *s, void *p) | 229 | static void stat_seq_stop(struct seq_file *s, void *p) |
180 | { | 230 | { |
181 | struct tracer_stat_session *session = s->private; | 231 | struct stat_session *session = s->private; |
182 | mutex_unlock(&session->stat_mutex); | 232 | mutex_unlock(&session->stat_mutex); |
183 | } | 233 | } |
184 | 234 | ||
185 | static int stat_seq_show(struct seq_file *s, void *v) | 235 | static int stat_seq_show(struct seq_file *s, void *v) |
186 | { | 236 | { |
187 | struct tracer_stat_session *session = s->private; | 237 | struct stat_session *session = s->private; |
188 | struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); | 238 | struct stat_node *l = container_of(v, struct stat_node, node); |
189 | 239 | ||
190 | if (v == SEQ_START_TOKEN) | 240 | if (v == SEQ_START_TOKEN) |
191 | return session->ts->stat_headers(s); | 241 | return session->ts->stat_headers(s); |
@@ -205,7 +255,7 @@ static int tracing_stat_open(struct inode *inode, struct file *file) | |||
205 | { | 255 | { |
206 | int ret; | 256 | int ret; |
207 | 257 | ||
208 | struct tracer_stat_session *session = inode->i_private; | 258 | struct stat_session *session = inode->i_private; |
209 | 259 | ||
210 | ret = seq_open(file, &trace_stat_seq_ops); | 260 | ret = seq_open(file, &trace_stat_seq_ops); |
211 | if (!ret) { | 261 | if (!ret) { |
@@ -218,11 +268,11 @@ static int tracing_stat_open(struct inode *inode, struct file *file) | |||
218 | } | 268 | } |
219 | 269 | ||
220 | /* | 270 | /* |
221 | * Avoid consuming memory with our now useless list. | 271 | * Avoid consuming memory with our now useless rbtree. |
222 | */ | 272 | */ |
223 | static int tracing_stat_release(struct inode *i, struct file *f) | 273 | static int tracing_stat_release(struct inode *i, struct file *f) |
224 | { | 274 | { |
225 | struct tracer_stat_session *session = i->i_private; | 275 | struct stat_session *session = i->i_private; |
226 | 276 | ||
227 | mutex_lock(&session->stat_mutex); | 277 | mutex_lock(&session->stat_mutex); |
228 | reset_stat_session(session); | 278 | reset_stat_session(session); |
@@ -251,7 +301,7 @@ static int tracing_stat_init(void) | |||
251 | return 0; | 301 | return 0; |
252 | } | 302 | } |
253 | 303 | ||
254 | static int init_stat_file(struct tracer_stat_session *session) | 304 | static int init_stat_file(struct stat_session *session) |
255 | { | 305 | { |
256 | if (!stat_dir && tracing_stat_init()) | 306 | if (!stat_dir && tracing_stat_init()) |
257 | return -ENODEV; | 307 | return -ENODEV; |
@@ -266,7 +316,7 @@ static int init_stat_file(struct tracer_stat_session *session) | |||
266 | 316 | ||
267 | int register_stat_tracer(struct tracer_stat *trace) | 317 | int register_stat_tracer(struct tracer_stat *trace) |
268 | { | 318 | { |
269 | struct tracer_stat_session *session, *node, *tmp; | 319 | struct stat_session *session, *node; |
270 | int ret; | 320 | int ret; |
271 | 321 | ||
272 | if (!trace) | 322 | if (!trace) |
@@ -277,7 +327,7 @@ int register_stat_tracer(struct tracer_stat *trace) | |||
277 | 327 | ||
278 | /* Already registered? */ | 328 | /* Already registered? */ |
279 | mutex_lock(&all_stat_sessions_mutex); | 329 | mutex_lock(&all_stat_sessions_mutex); |
280 | list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { | 330 | list_for_each_entry(node, &all_stat_sessions, session_list) { |
281 | if (node->ts == trace) { | 331 | if (node->ts == trace) { |
282 | mutex_unlock(&all_stat_sessions_mutex); | 332 | mutex_unlock(&all_stat_sessions_mutex); |
283 | return -EINVAL; | 333 | return -EINVAL; |
@@ -286,15 +336,13 @@ int register_stat_tracer(struct tracer_stat *trace) | |||
286 | mutex_unlock(&all_stat_sessions_mutex); | 336 | mutex_unlock(&all_stat_sessions_mutex); |
287 | 337 | ||
288 | /* Init the session */ | 338 | /* Init the session */ |
289 | session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL); | 339 | session = kzalloc(sizeof(*session), GFP_KERNEL); |
290 | if (!session) | 340 | if (!session) |
291 | return -ENOMEM; | 341 | return -ENOMEM; |
292 | 342 | ||
293 | session->ts = trace; | 343 | session->ts = trace; |
294 | INIT_LIST_HEAD(&session->session_list); | 344 | INIT_LIST_HEAD(&session->session_list); |
295 | INIT_LIST_HEAD(&session->stat_list); | ||
296 | mutex_init(&session->stat_mutex); | 345 | mutex_init(&session->stat_mutex); |
297 | session->file = NULL; | ||
298 | 346 | ||
299 | ret = init_stat_file(session); | 347 | ret = init_stat_file(session); |
300 | if (ret) { | 348 | if (ret) { |
@@ -312,7 +360,7 @@ int register_stat_tracer(struct tracer_stat *trace) | |||
312 | 360 | ||
313 | void unregister_stat_tracer(struct tracer_stat *trace) | 361 | void unregister_stat_tracer(struct tracer_stat *trace) |
314 | { | 362 | { |
315 | struct tracer_stat_session *node, *tmp; | 363 | struct stat_session *node, *tmp; |
316 | 364 | ||
317 | mutex_lock(&all_stat_sessions_mutex); | 365 | mutex_lock(&all_stat_sessions_mutex); |
318 | list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { | 366 | list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { |
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 984b9175c13d..97fcea4acce1 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
@@ -6,7 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | 8 | ||
9 | #include <trace/workqueue.h> | 9 | #include <trace/events/workqueue.h> |
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <linux/percpu.h> | 11 | #include <linux/percpu.h> |
12 | #include "trace_stat.h" | 12 | #include "trace_stat.h" |
@@ -16,8 +16,6 @@ | |||
16 | /* A cpu workqueue thread */ | 16 | /* A cpu workqueue thread */ |
17 | struct cpu_workqueue_stats { | 17 | struct cpu_workqueue_stats { |
18 | struct list_head list; | 18 | struct list_head list; |
19 | /* Useful to know if we print the cpu headers */ | ||
20 | bool first_entry; | ||
21 | int cpu; | 19 | int cpu; |
22 | pid_t pid; | 20 | pid_t pid; |
23 | /* Can be inserted from interrupt or user context, need to be atomic */ | 21 | /* Can be inserted from interrupt or user context, need to be atomic */ |
@@ -47,12 +45,11 @@ probe_workqueue_insertion(struct task_struct *wq_thread, | |||
47 | struct work_struct *work) | 45 | struct work_struct *work) |
48 | { | 46 | { |
49 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | 47 | int cpu = cpumask_first(&wq_thread->cpus_allowed); |
50 | struct cpu_workqueue_stats *node, *next; | 48 | struct cpu_workqueue_stats *node; |
51 | unsigned long flags; | 49 | unsigned long flags; |
52 | 50 | ||
53 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | 51 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
54 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | 52 | list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { |
55 | list) { | ||
56 | if (node->pid == wq_thread->pid) { | 53 | if (node->pid == wq_thread->pid) { |
57 | atomic_inc(&node->inserted); | 54 | atomic_inc(&node->inserted); |
58 | goto found; | 55 | goto found; |
@@ -69,12 +66,11 @@ probe_workqueue_execution(struct task_struct *wq_thread, | |||
69 | struct work_struct *work) | 66 | struct work_struct *work) |
70 | { | 67 | { |
71 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | 68 | int cpu = cpumask_first(&wq_thread->cpus_allowed); |
72 | struct cpu_workqueue_stats *node, *next; | 69 | struct cpu_workqueue_stats *node; |
73 | unsigned long flags; | 70 | unsigned long flags; |
74 | 71 | ||
75 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | 72 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
76 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | 73 | list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { |
77 | list) { | ||
78 | if (node->pid == wq_thread->pid) { | 74 | if (node->pid == wq_thread->pid) { |
79 | node->executed++; | 75 | node->executed++; |
80 | goto found; | 76 | goto found; |
@@ -105,8 +101,6 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) | |||
105 | cws->pid = wq_thread->pid; | 101 | cws->pid = wq_thread->pid; |
106 | 102 | ||
107 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | 103 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
108 | if (list_empty(&workqueue_cpu_stat(cpu)->list)) | ||
109 | cws->first_entry = true; | ||
110 | list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); | 104 | list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); |
111 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | 105 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); |
112 | } | 106 | } |
@@ -191,16 +185,9 @@ static void *workqueue_stat_next(void *prev, int idx) | |||
191 | static int workqueue_stat_show(struct seq_file *s, void *p) | 185 | static int workqueue_stat_show(struct seq_file *s, void *p) |
192 | { | 186 | { |
193 | struct cpu_workqueue_stats *cws = p; | 187 | struct cpu_workqueue_stats *cws = p; |
194 | unsigned long flags; | ||
195 | int cpu = cws->cpu; | ||
196 | struct pid *pid; | 188 | struct pid *pid; |
197 | struct task_struct *tsk; | 189 | struct task_struct *tsk; |
198 | 190 | ||
199 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
200 | if (&cws->list == workqueue_cpu_stat(cpu)->list.next) | ||
201 | seq_printf(s, "\n"); | ||
202 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
203 | |||
204 | pid = find_get_pid(cws->pid); | 191 | pid = find_get_pid(cws->pid); |
205 | if (pid) { | 192 | if (pid) { |
206 | tsk = get_pid_task(pid, PIDTYPE_PID); | 193 | tsk = get_pid_task(pid, PIDTYPE_PID); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f71fb2a08950..0668795d8818 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -33,7 +33,8 @@ | |||
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/debug_locks.h> | 34 | #include <linux/debug_locks.h> |
35 | #include <linux/lockdep.h> | 35 | #include <linux/lockdep.h> |
36 | #include <trace/workqueue.h> | 36 | #define CREATE_TRACE_POINTS |
37 | #include <trace/events/workqueue.h> | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * The per-CPU workqueue (if single thread, we always use the first | 40 | * The per-CPU workqueue (if single thread, we always use the first |
@@ -124,8 +125,6 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
124 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 125 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
125 | } | 126 | } |
126 | 127 | ||
127 | DEFINE_TRACE(workqueue_insertion); | ||
128 | |||
129 | static void insert_work(struct cpu_workqueue_struct *cwq, | 128 | static void insert_work(struct cpu_workqueue_struct *cwq, |
130 | struct work_struct *work, struct list_head *head) | 129 | struct work_struct *work, struct list_head *head) |
131 | { | 130 | { |
@@ -262,8 +261,6 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
262 | } | 261 | } |
263 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 262 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); |
264 | 263 | ||
265 | DEFINE_TRACE(workqueue_execution); | ||
266 | |||
267 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 264 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
268 | { | 265 | { |
269 | spin_lock_irq(&cwq->lock); | 266 | spin_lock_irq(&cwq->lock); |
@@ -753,8 +750,6 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | |||
753 | return cwq; | 750 | return cwq; |
754 | } | 751 | } |
755 | 752 | ||
756 | DEFINE_TRACE(workqueue_creation); | ||
757 | |||
758 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 753 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) |
759 | { | 754 | { |
760 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 755 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
@@ -860,8 +855,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
860 | } | 855 | } |
861 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 856 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
862 | 857 | ||
863 | DEFINE_TRACE(workqueue_destruction); | ||
864 | |||
865 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 858 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
866 | { | 859 | { |
867 | /* | 860 | /* |