diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-10-01 05:20:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-01 05:20:48 -0400 |
commit | 0aa73ba1c4e1ad1d51a29e0df95ccd9f746918b6 (patch) | |
tree | f0714ddcd02812b4fbe3b5405df9e4068f5587e2 /kernel/trace/trace_events.c | |
parent | 925936ebf35a95c290e010b784c962164e6728f3 (diff) | |
parent | 33974093c024f08caadd2fc71a83bd811ed1831d (diff) |
Merge branch 'tracing/urgent' into tracing/core
Merge reason: Pick up latest fixes and update to latest upstream.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 56 |
1 files changed, 18 insertions, 38 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 8c91b7c8f047..5e9ffc33f6db 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -232,10 +232,9 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
232 | size_t cnt, loff_t *ppos) | 232 | size_t cnt, loff_t *ppos) |
233 | { | 233 | { |
234 | struct trace_parser parser; | 234 | struct trace_parser parser; |
235 | size_t read = 0; | 235 | ssize_t read, ret; |
236 | ssize_t ret; | ||
237 | 236 | ||
238 | if (!cnt || cnt < 0) | 237 | if (!cnt) |
239 | return 0; | 238 | return 0; |
240 | 239 | ||
241 | ret = tracing_update_buffers(); | 240 | ret = tracing_update_buffers(); |
@@ -247,7 +246,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
247 | 246 | ||
248 | read = trace_get_user(&parser, ubuf, cnt, ppos); | 247 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
249 | 248 | ||
250 | if (trace_parser_loaded((&parser))) { | 249 | if (read >= 0 && trace_parser_loaded((&parser))) { |
251 | int set = 1; | 250 | int set = 1; |
252 | 251 | ||
253 | if (*parser.buffer == '!') | 252 | if (*parser.buffer == '!') |
@@ -271,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
271 | static void * | 270 | static void * |
272 | t_next(struct seq_file *m, void *v, loff_t *pos) | 271 | t_next(struct seq_file *m, void *v, loff_t *pos) |
273 | { | 272 | { |
274 | struct list_head *list = m->private; | 273 | struct ftrace_event_call *call = v; |
275 | struct ftrace_event_call *call; | ||
276 | 274 | ||
277 | (*pos)++; | 275 | (*pos)++; |
278 | 276 | ||
279 | for (;;) { | 277 | list_for_each_entry_continue(call, &ftrace_events, list) { |
280 | if (list == &ftrace_events) | ||
281 | return NULL; | ||
282 | |||
283 | call = list_entry(list, struct ftrace_event_call, list); | ||
284 | |||
285 | /* | 278 | /* |
286 | * The ftrace subsystem is for showing formats only. | 279 | * The ftrace subsystem is for showing formats only. |
287 | * They can not be enabled or disabled via the event files. | 280 | * They can not be enabled or disabled via the event files. |
288 | */ | 281 | */ |
289 | if (call->regfunc) | 282 | if (call->regfunc) |
290 | break; | 283 | return call; |
291 | |||
292 | list = list->next; | ||
293 | } | 284 | } |
294 | 285 | ||
295 | m->private = list->next; | 286 | return NULL; |
296 | |||
297 | return call; | ||
298 | } | 287 | } |
299 | 288 | ||
300 | static void *t_start(struct seq_file *m, loff_t *pos) | 289 | static void *t_start(struct seq_file *m, loff_t *pos) |
301 | { | 290 | { |
302 | struct ftrace_event_call *call = NULL; | 291 | struct ftrace_event_call *call; |
303 | loff_t l; | 292 | loff_t l; |
304 | 293 | ||
305 | mutex_lock(&event_mutex); | 294 | mutex_lock(&event_mutex); |
306 | 295 | ||
307 | m->private = ftrace_events.next; | 296 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
308 | for (l = 0; l <= *pos; ) { | 297 | for (l = 0; l <= *pos; ) { |
309 | call = t_next(m, NULL, &l); | 298 | call = t_next(m, call, &l); |
310 | if (!call) | 299 | if (!call) |
311 | break; | 300 | break; |
312 | } | 301 | } |
@@ -316,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
316 | static void * | 305 | static void * |
317 | s_next(struct seq_file *m, void *v, loff_t *pos) | 306 | s_next(struct seq_file *m, void *v, loff_t *pos) |
318 | { | 307 | { |
319 | struct list_head *list = m->private; | 308 | struct ftrace_event_call *call = v; |
320 | struct ftrace_event_call *call; | ||
321 | 309 | ||
322 | (*pos)++; | 310 | (*pos)++; |
323 | 311 | ||
324 | retry: | 312 | list_for_each_entry_continue(call, &ftrace_events, list) { |
325 | if (list == &ftrace_events) | 313 | if (call->enabled) |
326 | return NULL; | 314 | return call; |
327 | |||
328 | call = list_entry(list, struct ftrace_event_call, list); | ||
329 | |||
330 | if (!call->enabled) { | ||
331 | list = list->next; | ||
332 | goto retry; | ||
333 | } | 315 | } |
334 | 316 | ||
335 | m->private = list->next; | 317 | return NULL; |
336 | |||
337 | return call; | ||
338 | } | 318 | } |
339 | 319 | ||
340 | static void *s_start(struct seq_file *m, loff_t *pos) | 320 | static void *s_start(struct seq_file *m, loff_t *pos) |
341 | { | 321 | { |
342 | struct ftrace_event_call *call = NULL; | 322 | struct ftrace_event_call *call; |
343 | loff_t l; | 323 | loff_t l; |
344 | 324 | ||
345 | mutex_lock(&event_mutex); | 325 | mutex_lock(&event_mutex); |
346 | 326 | ||
347 | m->private = ftrace_events.next; | 327 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
348 | for (l = 0; l <= *pos; ) { | 328 | for (l = 0; l <= *pos; ) { |
349 | call = s_next(m, NULL, &l); | 329 | call = s_next(m, call, &l); |
350 | if (!call) | 330 | if (!call) |
351 | break; | 331 | break; |
352 | } | 332 | } |