diff options
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 70 |
1 files changed, 28 insertions, 42 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fd52a19dd172..45068269ebb1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -125,13 +125,13 @@ int ftrace_dump_on_oops; | |||
125 | 125 | ||
126 | static int tracing_set_tracer(const char *buf); | 126 | static int tracing_set_tracer(const char *buf); |
127 | 127 | ||
128 | #define BOOTUP_TRACER_SIZE 100 | 128 | #define MAX_TRACER_SIZE 100 |
129 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
130 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; |
131 | 131 | ||
132 | static int __init set_ftrace(char *str) | 132 | static int __init set_ftrace(char *str) |
133 | { | 133 | { |
134 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
135 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; |
136 | /* We are using ftrace early, expand it */ | 136 | /* We are using ftrace early, expand it */ |
137 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; |
@@ -242,13 +242,6 @@ static struct tracer *trace_types __read_mostly; | |||
242 | static struct tracer *current_trace __read_mostly; | 242 | static struct tracer *current_trace __read_mostly; |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * max_tracer_type_len is used to simplify the allocating of | ||
246 | * buffers to read userspace tracer names. We keep track of | ||
247 | * the longest tracer name registered. | ||
248 | */ | ||
249 | static int max_tracer_type_len; | ||
250 | |||
251 | /* | ||
252 | * trace_types_lock is used to protect the trace_types list. | 245 | * trace_types_lock is used to protect the trace_types list. |
253 | * This lock is also used to keep user access serialized. | 246 | * This lock is also used to keep user access serialized. |
254 | * Accesses from userspace will grab this lock while userspace | 247 | * Accesses from userspace will grab this lock while userspace |
@@ -275,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock); | |||
275 | */ | 268 | */ |
276 | void trace_wake_up(void) | 269 | void trace_wake_up(void) |
277 | { | 270 | { |
271 | int cpu; | ||
272 | |||
273 | if (trace_flags & TRACE_ITER_BLOCK) | ||
274 | return; | ||
278 | /* | 275 | /* |
279 | * The runqueue_is_locked() can fail, but this is the best we | 276 | * The runqueue_is_locked() can fail, but this is the best we |
280 | * have for now: | 277 | * have for now: |
281 | */ | 278 | */ |
282 | if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) | 279 | cpu = get_cpu(); |
280 | if (!runqueue_is_locked(cpu)) | ||
283 | wake_up(&trace_wait); | 281 | wake_up(&trace_wait); |
282 | put_cpu(); | ||
284 | } | 283 | } |
285 | 284 | ||
286 | static int __init set_buf_size(char *str) | 285 | static int __init set_buf_size(char *str) |
@@ -416,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |||
416 | 415 | ||
417 | /* read the non-space input */ | 416 | /* read the non-space input */ |
418 | while (cnt && !isspace(ch)) { | 417 | while (cnt && !isspace(ch)) { |
419 | if (parser->idx < parser->size) | 418 | if (parser->idx < parser->size - 1) |
420 | parser->buffer[parser->idx++] = ch; | 419 | parser->buffer[parser->idx++] = ch; |
421 | else { | 420 | else { |
422 | ret = -EINVAL; | 421 | ret = -EINVAL; |
@@ -619,7 +618,6 @@ __releases(kernel_lock) | |||
619 | __acquires(kernel_lock) | 618 | __acquires(kernel_lock) |
620 | { | 619 | { |
621 | struct tracer *t; | 620 | struct tracer *t; |
622 | int len; | ||
623 | int ret = 0; | 621 | int ret = 0; |
624 | 622 | ||
625 | if (!type->name) { | 623 | if (!type->name) { |
@@ -627,6 +625,11 @@ __acquires(kernel_lock) | |||
627 | return -1; | 625 | return -1; |
628 | } | 626 | } |
629 | 627 | ||
628 | if (strlen(type->name) > MAX_TRACER_SIZE) { | ||
629 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | ||
630 | return -1; | ||
631 | } | ||
632 | |||
630 | /* | 633 | /* |
631 | * When this gets called we hold the BKL which means that | 634 | * When this gets called we hold the BKL which means that |
632 | * preemption is disabled. Various trace selftests however | 635 | * preemption is disabled. Various trace selftests however |
@@ -641,7 +644,7 @@ __acquires(kernel_lock) | |||
641 | for (t = trace_types; t; t = t->next) { | 644 | for (t = trace_types; t; t = t->next) { |
642 | if (strcmp(type->name, t->name) == 0) { | 645 | if (strcmp(type->name, t->name) == 0) { |
643 | /* already found */ | 646 | /* already found */ |
644 | pr_info("Trace %s already registered\n", | 647 | pr_info("Tracer %s already registered\n", |
645 | type->name); | 648 | type->name); |
646 | ret = -1; | 649 | ret = -1; |
647 | goto out; | 650 | goto out; |
@@ -692,9 +695,6 @@ __acquires(kernel_lock) | |||
692 | 695 | ||
693 | type->next = trace_types; | 696 | type->next = trace_types; |
694 | trace_types = type; | 697 | trace_types = type; |
695 | len = strlen(type->name); | ||
696 | if (len > max_tracer_type_len) | ||
697 | max_tracer_type_len = len; | ||
698 | 698 | ||
699 | out: | 699 | out: |
700 | tracing_selftest_running = false; | 700 | tracing_selftest_running = false; |
@@ -703,7 +703,7 @@ __acquires(kernel_lock) | |||
703 | if (ret || !default_bootup_tracer) | 703 | if (ret || !default_bootup_tracer) |
704 | goto out_unlock; | 704 | goto out_unlock; |
705 | 705 | ||
706 | if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) | 706 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) |
707 | goto out_unlock; | 707 | goto out_unlock; |
708 | 708 | ||
709 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 709 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
@@ -725,14 +725,13 @@ __acquires(kernel_lock) | |||
725 | void unregister_tracer(struct tracer *type) | 725 | void unregister_tracer(struct tracer *type) |
726 | { | 726 | { |
727 | struct tracer **t; | 727 | struct tracer **t; |
728 | int len; | ||
729 | 728 | ||
730 | mutex_lock(&trace_types_lock); | 729 | mutex_lock(&trace_types_lock); |
731 | for (t = &trace_types; *t; t = &(*t)->next) { | 730 | for (t = &trace_types; *t; t = &(*t)->next) { |
732 | if (*t == type) | 731 | if (*t == type) |
733 | goto found; | 732 | goto found; |
734 | } | 733 | } |
735 | pr_info("Trace %s not registered\n", type->name); | 734 | pr_info("Tracer %s not registered\n", type->name); |
736 | goto out; | 735 | goto out; |
737 | 736 | ||
738 | found: | 737 | found: |
@@ -745,17 +744,7 @@ void unregister_tracer(struct tracer *type) | |||
745 | current_trace->stop(&global_trace); | 744 | current_trace->stop(&global_trace); |
746 | current_trace = &nop_trace; | 745 | current_trace = &nop_trace; |
747 | } | 746 | } |
748 | 747 | out: | |
749 | if (strlen(type->name) != max_tracer_type_len) | ||
750 | goto out; | ||
751 | |||
752 | max_tracer_type_len = 0; | ||
753 | for (t = &trace_types; *t; t = &(*t)->next) { | ||
754 | len = strlen((*t)->name); | ||
755 | if (len > max_tracer_type_len) | ||
756 | max_tracer_type_len = len; | ||
757 | } | ||
758 | out: | ||
759 | mutex_unlock(&trace_types_lock); | 748 | mutex_unlock(&trace_types_lock); |
760 | } | 749 | } |
761 | 750 | ||
@@ -1960,7 +1949,7 @@ static int s_show(struct seq_file *m, void *v) | |||
1960 | return 0; | 1949 | return 0; |
1961 | } | 1950 | } |
1962 | 1951 | ||
1963 | static struct seq_operations tracer_seq_ops = { | 1952 | static const struct seq_operations tracer_seq_ops = { |
1964 | .start = s_start, | 1953 | .start = s_start, |
1965 | .next = s_next, | 1954 | .next = s_next, |
1966 | .stop = s_stop, | 1955 | .stop = s_stop, |
@@ -1995,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
1995 | if (current_trace) | 1984 | if (current_trace) |
1996 | *iter->trace = *current_trace; | 1985 | *iter->trace = *current_trace; |
1997 | 1986 | ||
1998 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) | 1987 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
1999 | goto fail; | 1988 | goto fail; |
2000 | 1989 | ||
2001 | cpumask_clear(iter->started); | ||
2002 | |||
2003 | if (current_trace && current_trace->print_max) | 1990 | if (current_trace && current_trace->print_max) |
2004 | iter->tr = &max_tr; | 1991 | iter->tr = &max_tr; |
2005 | else | 1992 | else |
@@ -2174,7 +2161,7 @@ static int t_show(struct seq_file *m, void *v) | |||
2174 | return 0; | 2161 | return 0; |
2175 | } | 2162 | } |
2176 | 2163 | ||
2177 | static struct seq_operations show_traces_seq_ops = { | 2164 | static const struct seq_operations show_traces_seq_ops = { |
2178 | .start = t_start, | 2165 | .start = t_start, |
2179 | .next = t_next, | 2166 | .next = t_next, |
2180 | .stop = t_stop, | 2167 | .stop = t_stop, |
@@ -2604,7 +2591,7 @@ static ssize_t | |||
2604 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 2591 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
2605 | size_t cnt, loff_t *ppos) | 2592 | size_t cnt, loff_t *ppos) |
2606 | { | 2593 | { |
2607 | char buf[max_tracer_type_len+2]; | 2594 | char buf[MAX_TRACER_SIZE+2]; |
2608 | int r; | 2595 | int r; |
2609 | 2596 | ||
2610 | mutex_lock(&trace_types_lock); | 2597 | mutex_lock(&trace_types_lock); |
@@ -2754,15 +2741,15 @@ static ssize_t | |||
2754 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 2741 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
2755 | size_t cnt, loff_t *ppos) | 2742 | size_t cnt, loff_t *ppos) |
2756 | { | 2743 | { |
2757 | char buf[max_tracer_type_len+1]; | 2744 | char buf[MAX_TRACER_SIZE+1]; |
2758 | int i; | 2745 | int i; |
2759 | size_t ret; | 2746 | size_t ret; |
2760 | int err; | 2747 | int err; |
2761 | 2748 | ||
2762 | ret = cnt; | 2749 | ret = cnt; |
2763 | 2750 | ||
2764 | if (cnt > max_tracer_type_len) | 2751 | if (cnt > MAX_TRACER_SIZE) |
2765 | cnt = max_tracer_type_len; | 2752 | cnt = MAX_TRACER_SIZE; |
2766 | 2753 | ||
2767 | if (copy_from_user(&buf, ubuf, cnt)) | 2754 | if (copy_from_user(&buf, ubuf, cnt)) |
2768 | return -EFAULT; | 2755 | return -EFAULT; |
@@ -4400,7 +4387,7 @@ __init static int tracer_alloc_buffers(void) | |||
4400 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4387 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
4401 | goto out_free_buffer_mask; | 4388 | goto out_free_buffer_mask; |
4402 | 4389 | ||
4403 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 4390 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) |
4404 | goto out_free_tracing_cpumask; | 4391 | goto out_free_tracing_cpumask; |
4405 | 4392 | ||
4406 | /* To save memory, keep the ring buffer size to its minimum */ | 4393 | /* To save memory, keep the ring buffer size to its minimum */ |
@@ -4411,7 +4398,6 @@ __init static int tracer_alloc_buffers(void) | |||
4411 | 4398 | ||
4412 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4399 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
4413 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4400 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
4414 | cpumask_clear(tracing_reader_cpumask); | ||
4415 | 4401 | ||
4416 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4402 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
4417 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4403 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |