aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-19 13:21:15 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-19 13:21:15 -0400
commit2df28818047f7cbd8ece9c965a0e8a423c80d511 (patch)
tree9de72c5e53ad8a27c7b901fb0490b79900fa6840 /kernel
parentbe4bdbfbae6b303c21ebe446648f617908a794b5 (diff)
parent30bd39cd6244ffe3258c9203405286ef77b1c4eb (diff)
Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c23
-rw-r--r--kernel/trace/trace.c49
-rw-r--r--kernel/trace/trace_events.c49
3 files changed, 35 insertions, 86 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index cc615f84751b..c71e91bf7372 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2414,11 +2414,9 @@ unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2414static void * 2414static void *
2415__g_next(struct seq_file *m, loff_t *pos) 2415__g_next(struct seq_file *m, loff_t *pos)
2416{ 2416{
2417 unsigned long *array = m->private;
2418
2419 if (*pos >= ftrace_graph_count) 2417 if (*pos >= ftrace_graph_count)
2420 return NULL; 2418 return NULL;
2421 return &array[*pos]; 2419 return &ftrace_graph_funcs[*pos];
2422} 2420}
2423 2421
2424static void * 2422static void *
@@ -2482,16 +2480,10 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2482 ftrace_graph_count = 0; 2480 ftrace_graph_count = 0;
2483 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2481 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2484 } 2482 }
2483 mutex_unlock(&graph_lock);
2485 2484
2486 if (file->f_mode & FMODE_READ) { 2485 if (file->f_mode & FMODE_READ)
2487 ret = seq_open(file, &ftrace_graph_seq_ops); 2486 ret = seq_open(file, &ftrace_graph_seq_ops);
2488 if (!ret) {
2489 struct seq_file *m = file->private_data;
2490 m->private = ftrace_graph_funcs;
2491 }
2492 } else
2493 file->private_data = ftrace_graph_funcs;
2494 mutex_unlock(&graph_lock);
2495 2487
2496 return ret; 2488 return ret;
2497} 2489}
@@ -2560,7 +2552,6 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2560 size_t cnt, loff_t *ppos) 2552 size_t cnt, loff_t *ppos)
2561{ 2553{
2562 struct trace_parser parser; 2554 struct trace_parser parser;
2563 unsigned long *array;
2564 size_t read = 0; 2555 size_t read = 0;
2565 ssize_t ret; 2556 ssize_t ret;
2566 2557
@@ -2574,12 +2565,6 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2574 goto out; 2565 goto out;
2575 } 2566 }
2576 2567
2577 if (file->f_mode & FMODE_READ) {
2578 struct seq_file *m = file->private_data;
2579 array = m->private;
2580 } else
2581 array = file->private_data;
2582
2583 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 2568 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2584 ret = -ENOMEM; 2569 ret = -ENOMEM;
2585 goto out; 2570 goto out;
@@ -2591,7 +2576,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2591 parser.buffer[parser.idx] = 0; 2576 parser.buffer[parser.idx] = 0;
2592 2577
2593 /* we allow only one expression at a time */ 2578 /* we allow only one expression at a time */
2594 ret = ftrace_set_func(array, &ftrace_graph_count, 2579 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2595 parser.buffer); 2580 parser.buffer);
2596 if (ret) 2581 if (ret)
2597 goto out; 2582 goto out;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index fd52a19dd172..861308072d28 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops;
125 125
126static int tracing_set_tracer(const char *buf); 126static int tracing_set_tracer(const char *buf);
127 127
128#define BOOTUP_TRACER_SIZE 100 128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
136 /* We are using ftrace early, expand it */ 136 /* We are using ftrace early, expand it */
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
242static struct tracer *current_trace __read_mostly; 242static struct tracer *current_trace __read_mostly;
243 243
244/* 244/*
245 * max_tracer_type_len is used to simplify the allocating of
246 * buffers to read userspace tracer names. We keep track of
247 * the longest tracer name registered.
248 */
249static int max_tracer_type_len;
250
251/*
252 * trace_types_lock is used to protect the trace_types list. 245 * trace_types_lock is used to protect the trace_types list.
253 * This lock is also used to keep user access serialized. 246 * This lock is also used to keep user access serialized.
254 * Accesses from userspace will grab this lock while userspace 247 * Accesses from userspace will grab this lock while userspace
@@ -619,7 +612,6 @@ __releases(kernel_lock)
619__acquires(kernel_lock) 612__acquires(kernel_lock)
620{ 613{
621 struct tracer *t; 614 struct tracer *t;
622 int len;
623 int ret = 0; 615 int ret = 0;
624 616
625 if (!type->name) { 617 if (!type->name) {
@@ -627,6 +619,11 @@ __acquires(kernel_lock)
627 return -1; 619 return -1;
628 } 620 }
629 621
622 if (strlen(type->name) > MAX_TRACER_SIZE) {
623 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
624 return -1;
625 }
626
630 /* 627 /*
631 * When this gets called we hold the BKL which means that 628 * When this gets called we hold the BKL which means that
632 * preemption is disabled. Various trace selftests however 629 * preemption is disabled. Various trace selftests however
@@ -641,7 +638,7 @@ __acquires(kernel_lock)
641 for (t = trace_types; t; t = t->next) { 638 for (t = trace_types; t; t = t->next) {
642 if (strcmp(type->name, t->name) == 0) { 639 if (strcmp(type->name, t->name) == 0) {
643 /* already found */ 640 /* already found */
644 pr_info("Trace %s already registered\n", 641 pr_info("Tracer %s already registered\n",
645 type->name); 642 type->name);
646 ret = -1; 643 ret = -1;
647 goto out; 644 goto out;
@@ -692,9 +689,6 @@ __acquires(kernel_lock)
692 689
693 type->next = trace_types; 690 type->next = trace_types;
694 trace_types = type; 691 trace_types = type;
695 len = strlen(type->name);
696 if (len > max_tracer_type_len)
697 max_tracer_type_len = len;
698 692
699 out: 693 out:
700 tracing_selftest_running = false; 694 tracing_selftest_running = false;
@@ -703,7 +697,7 @@ __acquires(kernel_lock)
703 if (ret || !default_bootup_tracer) 697 if (ret || !default_bootup_tracer)
704 goto out_unlock; 698 goto out_unlock;
705 699
706 if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) 700 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
707 goto out_unlock; 701 goto out_unlock;
708 702
709 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 703 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@@ -725,14 +719,13 @@ __acquires(kernel_lock)
725void unregister_tracer(struct tracer *type) 719void unregister_tracer(struct tracer *type)
726{ 720{
727 struct tracer **t; 721 struct tracer **t;
728 int len;
729 722
730 mutex_lock(&trace_types_lock); 723 mutex_lock(&trace_types_lock);
731 for (t = &trace_types; *t; t = &(*t)->next) { 724 for (t = &trace_types; *t; t = &(*t)->next) {
732 if (*t == type) 725 if (*t == type)
733 goto found; 726 goto found;
734 } 727 }
735 pr_info("Trace %s not registered\n", type->name); 728 pr_info("Tracer %s not registered\n", type->name);
736 goto out; 729 goto out;
737 730
738 found: 731 found:
@@ -745,17 +738,7 @@ void unregister_tracer(struct tracer *type)
745 current_trace->stop(&global_trace); 738 current_trace->stop(&global_trace);
746 current_trace = &nop_trace; 739 current_trace = &nop_trace;
747 } 740 }
748 741out:
749 if (strlen(type->name) != max_tracer_type_len)
750 goto out;
751
752 max_tracer_type_len = 0;
753 for (t = &trace_types; *t; t = &(*t)->next) {
754 len = strlen((*t)->name);
755 if (len > max_tracer_type_len)
756 max_tracer_type_len = len;
757 }
758 out:
759 mutex_unlock(&trace_types_lock); 742 mutex_unlock(&trace_types_lock);
760} 743}
761 744
@@ -2604,7 +2587,7 @@ static ssize_t
2604tracing_set_trace_read(struct file *filp, char __user *ubuf, 2587tracing_set_trace_read(struct file *filp, char __user *ubuf,
2605 size_t cnt, loff_t *ppos) 2588 size_t cnt, loff_t *ppos)
2606{ 2589{
2607 char buf[max_tracer_type_len+2]; 2590 char buf[MAX_TRACER_SIZE+2];
2608 int r; 2591 int r;
2609 2592
2610 mutex_lock(&trace_types_lock); 2593 mutex_lock(&trace_types_lock);
@@ -2754,15 +2737,15 @@ static ssize_t
2754tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2737tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2755 size_t cnt, loff_t *ppos) 2738 size_t cnt, loff_t *ppos)
2756{ 2739{
2757 char buf[max_tracer_type_len+1]; 2740 char buf[MAX_TRACER_SIZE+1];
2758 int i; 2741 int i;
2759 size_t ret; 2742 size_t ret;
2760 int err; 2743 int err;
2761 2744
2762 ret = cnt; 2745 ret = cnt;
2763 2746
2764 if (cnt > max_tracer_type_len) 2747 if (cnt > MAX_TRACER_SIZE)
2765 cnt = max_tracer_type_len; 2748 cnt = MAX_TRACER_SIZE;
2766 2749
2767 if (copy_from_user(&buf, ubuf, cnt)) 2750 if (copy_from_user(&buf, ubuf, cnt))
2768 return -EFAULT; 2751 return -EFAULT;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 56c260b83a9c..6f03c8a1105e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -271,42 +271,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
271static void * 271static void *
272t_next(struct seq_file *m, void *v, loff_t *pos) 272t_next(struct seq_file *m, void *v, loff_t *pos)
273{ 273{
274 struct list_head *list = m->private; 274 struct ftrace_event_call *call = v;
275 struct ftrace_event_call *call;
276 275
277 (*pos)++; 276 (*pos)++;
278 277
279 for (;;) { 278 list_for_each_entry_continue(call, &ftrace_events, list) {
280 if (list == &ftrace_events)
281 return NULL;
282
283 call = list_entry(list, struct ftrace_event_call, list);
284
285 /* 279 /*
286 * The ftrace subsystem is for showing formats only. 280 * The ftrace subsystem is for showing formats only.
287 * They can not be enabled or disabled via the event files. 281 * They can not be enabled or disabled via the event files.
288 */ 282 */
289 if (call->regfunc) 283 if (call->regfunc)
290 break; 284 return call;
291
292 list = list->next;
293 } 285 }
294 286
295 m->private = list->next; 287 return NULL;
296
297 return call;
298} 288}
299 289
300static void *t_start(struct seq_file *m, loff_t *pos) 290static void *t_start(struct seq_file *m, loff_t *pos)
301{ 291{
302 struct ftrace_event_call *call = NULL; 292 struct ftrace_event_call *call;
303 loff_t l; 293 loff_t l;
304 294
305 mutex_lock(&event_mutex); 295 mutex_lock(&event_mutex);
306 296
307 m->private = ftrace_events.next; 297 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
308 for (l = 0; l <= *pos; ) { 298 for (l = 0; l <= *pos; ) {
309 call = t_next(m, NULL, &l); 299 call = t_next(m, call, &l);
310 if (!call) 300 if (!call)
311 break; 301 break;
312 } 302 }
@@ -316,37 +306,28 @@ static void *t_start(struct seq_file *m, loff_t *pos)
316static void * 306static void *
317s_next(struct seq_file *m, void *v, loff_t *pos) 307s_next(struct seq_file *m, void *v, loff_t *pos)
318{ 308{
319 struct list_head *list = m->private; 309 struct ftrace_event_call *call = v;
320 struct ftrace_event_call *call;
321 310
322 (*pos)++; 311 (*pos)++;
323 312
324 retry: 313 list_for_each_entry_continue(call, &ftrace_events, list) {
325 if (list == &ftrace_events) 314 if (call->enabled)
326 return NULL; 315 return call;
327
328 call = list_entry(list, struct ftrace_event_call, list);
329
330 if (!call->enabled) {
331 list = list->next;
332 goto retry;
333 } 316 }
334 317
335 m->private = list->next; 318 return NULL;
336
337 return call;
338} 319}
339 320
340static void *s_start(struct seq_file *m, loff_t *pos) 321static void *s_start(struct seq_file *m, loff_t *pos)
341{ 322{
342 struct ftrace_event_call *call = NULL; 323 struct ftrace_event_call *call;
343 loff_t l; 324 loff_t l;
344 325
345 mutex_lock(&event_mutex); 326 mutex_lock(&event_mutex);
346 327
347 m->private = ftrace_events.next; 328 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
348 for (l = 0; l <= *pos; ) { 329 for (l = 0; l <= *pos; ) {
349 call = s_next(m, NULL, &l); 330 call = s_next(m, call, &l);
350 if (!call) 331 if (!call)
351 break; 332 break;
352 } 333 }