aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:05:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:05:47 -0400
commitbd4c3a3441144cd46d1f544046523724c5bc6e94 (patch)
tree8b5c67249a7a163caf3f88cbcb9df5236fcc3b93 /kernel/trace/trace.c
parentb3727c24da69971503a4ca98b3b877753c6a4393 (diff)
parent583a22e7c154dc0a3938db522696b4bc7f098f59 (diff)
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: kernel/profile.c: Switch /proc/irq/prof_cpu_mask to seq_file tracing: Export trace_profile_buf symbols tracing/events: use list_for_entry_continue tracing: remove max_tracer_type_len function-graph: use ftrace_graph_funcs directly tracing: Remove markers tracing: Allocate the ftrace event profile buffer dynamically tracing: Factorize the events profile accounting
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c49
1 files changed, 16 insertions, 33 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index fd52a19dd172..861308072d28 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops;
125 125
126static int tracing_set_tracer(const char *buf); 126static int tracing_set_tracer(const char *buf);
127 127
128#define BOOTUP_TRACER_SIZE 100 128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
136 /* We are using ftrace early, expand it */ 136 /* We are using ftrace early, expand it */
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
242static struct tracer *current_trace __read_mostly; 242static struct tracer *current_trace __read_mostly;
243 243
244/* 244/*
245 * max_tracer_type_len is used to simplify the allocating of
246 * buffers to read userspace tracer names. We keep track of
247 * the longest tracer name registered.
248 */
249static int max_tracer_type_len;
250
251/*
252 * trace_types_lock is used to protect the trace_types list. 245 * trace_types_lock is used to protect the trace_types list.
253 * This lock is also used to keep user access serialized. 246 * This lock is also used to keep user access serialized.
254 * Accesses from userspace will grab this lock while userspace 247 * Accesses from userspace will grab this lock while userspace
@@ -619,7 +612,6 @@ __releases(kernel_lock)
619__acquires(kernel_lock) 612__acquires(kernel_lock)
620{ 613{
621 struct tracer *t; 614 struct tracer *t;
622 int len;
623 int ret = 0; 615 int ret = 0;
624 616
625 if (!type->name) { 617 if (!type->name) {
@@ -627,6 +619,11 @@ __acquires(kernel_lock)
627 return -1; 619 return -1;
628 } 620 }
629 621
622 if (strlen(type->name) > MAX_TRACER_SIZE) {
623 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
624 return -1;
625 }
626
630 /* 627 /*
631 * When this gets called we hold the BKL which means that 628 * When this gets called we hold the BKL which means that
632 * preemption is disabled. Various trace selftests however 629 * preemption is disabled. Various trace selftests however
@@ -641,7 +638,7 @@ __acquires(kernel_lock)
641 for (t = trace_types; t; t = t->next) { 638 for (t = trace_types; t; t = t->next) {
642 if (strcmp(type->name, t->name) == 0) { 639 if (strcmp(type->name, t->name) == 0) {
643 /* already found */ 640 /* already found */
644 pr_info("Trace %s already registered\n", 641 pr_info("Tracer %s already registered\n",
645 type->name); 642 type->name);
646 ret = -1; 643 ret = -1;
647 goto out; 644 goto out;
@@ -692,9 +689,6 @@ __acquires(kernel_lock)
692 689
693 type->next = trace_types; 690 type->next = trace_types;
694 trace_types = type; 691 trace_types = type;
695 len = strlen(type->name);
696 if (len > max_tracer_type_len)
697 max_tracer_type_len = len;
698 692
699 out: 693 out:
700 tracing_selftest_running = false; 694 tracing_selftest_running = false;
@@ -703,7 +697,7 @@ __acquires(kernel_lock)
703 if (ret || !default_bootup_tracer) 697 if (ret || !default_bootup_tracer)
704 goto out_unlock; 698 goto out_unlock;
705 699
706 if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) 700 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
707 goto out_unlock; 701 goto out_unlock;
708 702
709 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 703 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@@ -725,14 +719,13 @@ __acquires(kernel_lock)
725void unregister_tracer(struct tracer *type) 719void unregister_tracer(struct tracer *type)
726{ 720{
727 struct tracer **t; 721 struct tracer **t;
728 int len;
729 722
730 mutex_lock(&trace_types_lock); 723 mutex_lock(&trace_types_lock);
731 for (t = &trace_types; *t; t = &(*t)->next) { 724 for (t = &trace_types; *t; t = &(*t)->next) {
732 if (*t == type) 725 if (*t == type)
733 goto found; 726 goto found;
734 } 727 }
735 pr_info("Trace %s not registered\n", type->name); 728 pr_info("Tracer %s not registered\n", type->name);
736 goto out; 729 goto out;
737 730
738 found: 731 found:
@@ -745,17 +738,7 @@ void unregister_tracer(struct tracer *type)
745 current_trace->stop(&global_trace); 738 current_trace->stop(&global_trace);
746 current_trace = &nop_trace; 739 current_trace = &nop_trace;
747 } 740 }
748 741out:
749 if (strlen(type->name) != max_tracer_type_len)
750 goto out;
751
752 max_tracer_type_len = 0;
753 for (t = &trace_types; *t; t = &(*t)->next) {
754 len = strlen((*t)->name);
755 if (len > max_tracer_type_len)
756 max_tracer_type_len = len;
757 }
758 out:
759 mutex_unlock(&trace_types_lock); 742 mutex_unlock(&trace_types_lock);
760} 743}
761 744
@@ -2604,7 +2587,7 @@ static ssize_t
2604tracing_set_trace_read(struct file *filp, char __user *ubuf, 2587tracing_set_trace_read(struct file *filp, char __user *ubuf,
2605 size_t cnt, loff_t *ppos) 2588 size_t cnt, loff_t *ppos)
2606{ 2589{
2607 char buf[max_tracer_type_len+2]; 2590 char buf[MAX_TRACER_SIZE+2];
2608 int r; 2591 int r;
2609 2592
2610 mutex_lock(&trace_types_lock); 2593 mutex_lock(&trace_types_lock);
@@ -2754,15 +2737,15 @@ static ssize_t
2754tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2737tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2755 size_t cnt, loff_t *ppos) 2738 size_t cnt, loff_t *ppos)
2756{ 2739{
2757 char buf[max_tracer_type_len+1]; 2740 char buf[MAX_TRACER_SIZE+1];
2758 int i; 2741 int i;
2759 size_t ret; 2742 size_t ret;
2760 int err; 2743 int err;
2761 2744
2762 ret = cnt; 2745 ret = cnt;
2763 2746
2764 if (cnt > max_tracer_type_len) 2747 if (cnt > MAX_TRACER_SIZE)
2765 cnt = max_tracer_type_len; 2748 cnt = MAX_TRACER_SIZE;
2766 2749
2767 if (copy_from_user(&buf, ubuf, cnt)) 2750 if (copy_from_user(&buf, ubuf, cnt))
2768 return -EFAULT; 2751 return -EFAULT;