diff options
author | Steven Rostedt <srostedt@redhat.com> | 2012-05-11 13:29:49 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:34:42 -0400 |
commit | 2b6080f28c7cc3efc8625ab71495aae89aeb63a0 (patch) | |
tree | f3fe3b8a7ce99dda0da01f097255cae596083c88 /kernel | |
parent | ae3b5093ad6004b52e2825f3db1ad8200a2724d8 (diff) |
tracing: Encapsulate global_trace and remove dependencies on global vars
The global_trace variable in kernel/trace/trace.c has been kept 'static' and
local to that file so that it would not be used too much outside of that
file. This has paid off, even though there were lots of changes to make
the trace_array structure more generic (not depending on global_trace).
Removal of a lot of direct usages of global_trace is needed to be able to
create more trace_arrays such that we can add multiple buffers.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.c | 561 | ||||
-rw-r--r-- | kernel/trace/trace.h | 21 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 8 |
4 files changed, 358 insertions, 240 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 59953aa28845..91fe40905828 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * ring buffer based function tracer | 2 | * ring buffer based function tracer |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
6 | * | 6 | * |
7 | * Originally taken from the RT patch by: | 7 | * Originally taken from the RT patch by: |
@@ -251,9 +251,6 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; | |||
251 | /* trace_types holds a link list of available tracers. */ | 251 | /* trace_types holds a link list of available tracers. */ |
252 | static struct tracer *trace_types __read_mostly; | 252 | static struct tracer *trace_types __read_mostly; |
253 | 253 | ||
254 | /* current_trace points to the tracer that is currently active */ | ||
255 | static struct tracer *current_trace __read_mostly = &nop_trace; | ||
256 | |||
257 | /* | 254 | /* |
258 | * trace_types_lock is used to protect the trace_types list. | 255 | * trace_types_lock is used to protect the trace_types list. |
259 | */ | 256 | */ |
@@ -350,9 +347,6 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
350 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | | 347 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | |
351 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; | 348 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; |
352 | 349 | ||
353 | static int trace_stop_count; | ||
354 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); | ||
355 | |||
356 | /** | 350 | /** |
357 | * trace_wake_up - wake up tasks waiting for trace input | 351 | * trace_wake_up - wake up tasks waiting for trace input |
358 | * | 352 | * |
@@ -708,14 +702,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
708 | { | 702 | { |
709 | struct ring_buffer *buf; | 703 | struct ring_buffer *buf; |
710 | 704 | ||
711 | if (trace_stop_count) | 705 | if (tr->stop_count) |
712 | return; | 706 | return; |
713 | 707 | ||
714 | WARN_ON_ONCE(!irqs_disabled()); | 708 | WARN_ON_ONCE(!irqs_disabled()); |
715 | 709 | ||
716 | if (!current_trace->allocated_snapshot) { | 710 | if (!tr->current_trace->allocated_snapshot) { |
717 | /* Only the nop tracer should hit this when disabling */ | 711 | /* Only the nop tracer should hit this when disabling */ |
718 | WARN_ON_ONCE(current_trace != &nop_trace); | 712 | WARN_ON_ONCE(tr->current_trace != &nop_trace); |
719 | return; | 713 | return; |
720 | } | 714 | } |
721 | 715 | ||
@@ -742,11 +736,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
742 | { | 736 | { |
743 | int ret; | 737 | int ret; |
744 | 738 | ||
745 | if (trace_stop_count) | 739 | if (tr->stop_count) |
746 | return; | 740 | return; |
747 | 741 | ||
748 | WARN_ON_ONCE(!irqs_disabled()); | 742 | WARN_ON_ONCE(!irqs_disabled()); |
749 | if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) | 743 | if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot)) |
750 | return; | 744 | return; |
751 | 745 | ||
752 | arch_spin_lock(&ftrace_max_lock); | 746 | arch_spin_lock(&ftrace_max_lock); |
@@ -853,8 +847,8 @@ int register_tracer(struct tracer *type) | |||
853 | 847 | ||
854 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 848 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
855 | if (type->selftest && !tracing_selftest_disabled) { | 849 | if (type->selftest && !tracing_selftest_disabled) { |
856 | struct tracer *saved_tracer = current_trace; | ||
857 | struct trace_array *tr = &global_trace; | 850 | struct trace_array *tr = &global_trace; |
851 | struct tracer *saved_tracer = tr->current_trace; | ||
858 | 852 | ||
859 | /* | 853 | /* |
860 | * Run a selftest on this tracer. | 854 | * Run a selftest on this tracer. |
@@ -865,7 +859,7 @@ int register_tracer(struct tracer *type) | |||
865 | */ | 859 | */ |
866 | tracing_reset_online_cpus(tr); | 860 | tracing_reset_online_cpus(tr); |
867 | 861 | ||
868 | current_trace = type; | 862 | tr->current_trace = type; |
869 | 863 | ||
870 | if (type->use_max_tr) { | 864 | if (type->use_max_tr) { |
871 | /* If we expanded the buffers, make sure the max is expanded too */ | 865 | /* If we expanded the buffers, make sure the max is expanded too */ |
@@ -879,7 +873,7 @@ int register_tracer(struct tracer *type) | |||
879 | pr_info("Testing tracer %s: ", type->name); | 873 | pr_info("Testing tracer %s: ", type->name); |
880 | ret = type->selftest(type, tr); | 874 | ret = type->selftest(type, tr); |
881 | /* the test is responsible for resetting too */ | 875 | /* the test is responsible for resetting too */ |
882 | current_trace = saved_tracer; | 876 | tr->current_trace = saved_tracer; |
883 | if (ret) { | 877 | if (ret) { |
884 | printk(KERN_CONT "FAILED!\n"); | 878 | printk(KERN_CONT "FAILED!\n"); |
885 | /* Add the warning after printing 'FAILED' */ | 879 | /* Add the warning after printing 'FAILED' */ |
@@ -997,7 +991,7 @@ static void trace_init_cmdlines(void) | |||
997 | 991 | ||
998 | int is_tracing_stopped(void) | 992 | int is_tracing_stopped(void) |
999 | { | 993 | { |
1000 | return trace_stop_count; | 994 | return global_trace.stop_count; |
1001 | } | 995 | } |
1002 | 996 | ||
1003 | /** | 997 | /** |
@@ -1029,12 +1023,12 @@ void tracing_start(void) | |||
1029 | if (tracing_disabled) | 1023 | if (tracing_disabled) |
1030 | return; | 1024 | return; |
1031 | 1025 | ||
1032 | raw_spin_lock_irqsave(&tracing_start_lock, flags); | 1026 | raw_spin_lock_irqsave(&global_trace.start_lock, flags); |
1033 | if (--trace_stop_count) { | 1027 | if (--global_trace.stop_count) { |
1034 | if (trace_stop_count < 0) { | 1028 | if (global_trace.stop_count < 0) { |
1035 | /* Someone screwed up their debugging */ | 1029 | /* Someone screwed up their debugging */ |
1036 | WARN_ON_ONCE(1); | 1030 | WARN_ON_ONCE(1); |
1037 | trace_stop_count = 0; | 1031 | global_trace.stop_count = 0; |
1038 | } | 1032 | } |
1039 | goto out; | 1033 | goto out; |
1040 | } | 1034 | } |
@@ -1054,7 +1048,38 @@ void tracing_start(void) | |||
1054 | 1048 | ||
1055 | ftrace_start(); | 1049 | ftrace_start(); |
1056 | out: | 1050 | out: |
1057 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 1051 | raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); |
1052 | } | ||
1053 | |||
1054 | static void tracing_start_tr(struct trace_array *tr) | ||
1055 | { | ||
1056 | struct ring_buffer *buffer; | ||
1057 | unsigned long flags; | ||
1058 | |||
1059 | if (tracing_disabled) | ||
1060 | return; | ||
1061 | |||
1062 | /* If global, we need to also start the max tracer */ | ||
1063 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | ||
1064 | return tracing_start(); | ||
1065 | |||
1066 | raw_spin_lock_irqsave(&tr->start_lock, flags); | ||
1067 | |||
1068 | if (--tr->stop_count) { | ||
1069 | if (tr->stop_count < 0) { | ||
1070 | /* Someone screwed up their debugging */ | ||
1071 | WARN_ON_ONCE(1); | ||
1072 | tr->stop_count = 0; | ||
1073 | } | ||
1074 | goto out; | ||
1075 | } | ||
1076 | |||
1077 | buffer = tr->buffer; | ||
1078 | if (buffer) | ||
1079 | ring_buffer_record_enable(buffer); | ||
1080 | |||
1081 | out: | ||
1082 | raw_spin_unlock_irqrestore(&tr->start_lock, flags); | ||
1058 | } | 1083 | } |
1059 | 1084 | ||
1060 | /** | 1085 | /** |
@@ -1069,8 +1094,8 @@ void tracing_stop(void) | |||
1069 | unsigned long flags; | 1094 | unsigned long flags; |
1070 | 1095 | ||
1071 | ftrace_stop(); | 1096 | ftrace_stop(); |
1072 | raw_spin_lock_irqsave(&tracing_start_lock, flags); | 1097 | raw_spin_lock_irqsave(&global_trace.start_lock, flags); |
1073 | if (trace_stop_count++) | 1098 | if (global_trace.stop_count++) |
1074 | goto out; | 1099 | goto out; |
1075 | 1100 | ||
1076 | /* Prevent the buffers from switching */ | 1101 | /* Prevent the buffers from switching */ |
@@ -1087,7 +1112,28 @@ void tracing_stop(void) | |||
1087 | arch_spin_unlock(&ftrace_max_lock); | 1112 | arch_spin_unlock(&ftrace_max_lock); |
1088 | 1113 | ||
1089 | out: | 1114 | out: |
1090 | raw_spin_unlock_irqrestore(&tracing_start_lock, flags); | 1115 | raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); |
1116 | } | ||
1117 | |||
1118 | static void tracing_stop_tr(struct trace_array *tr) | ||
1119 | { | ||
1120 | struct ring_buffer *buffer; | ||
1121 | unsigned long flags; | ||
1122 | |||
1123 | /* If global, we need to also stop the max tracer */ | ||
1124 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | ||
1125 | return tracing_stop(); | ||
1126 | |||
1127 | raw_spin_lock_irqsave(&tr->start_lock, flags); | ||
1128 | if (tr->stop_count++) | ||
1129 | goto out; | ||
1130 | |||
1131 | buffer = tr->buffer; | ||
1132 | if (buffer) | ||
1133 | ring_buffer_record_disable(buffer); | ||
1134 | |||
1135 | out: | ||
1136 | raw_spin_unlock_irqrestore(&tr->start_lock, flags); | ||
1091 | } | 1137 | } |
1092 | 1138 | ||
1093 | void trace_stop_cmdline_recording(void); | 1139 | void trace_stop_cmdline_recording(void); |
@@ -1956,6 +2002,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) | |||
1956 | static void *s_start(struct seq_file *m, loff_t *pos) | 2002 | static void *s_start(struct seq_file *m, loff_t *pos) |
1957 | { | 2003 | { |
1958 | struct trace_iterator *iter = m->private; | 2004 | struct trace_iterator *iter = m->private; |
2005 | struct trace_array *tr = iter->tr; | ||
1959 | int cpu_file = iter->cpu_file; | 2006 | int cpu_file = iter->cpu_file; |
1960 | void *p = NULL; | 2007 | void *p = NULL; |
1961 | loff_t l = 0; | 2008 | loff_t l = 0; |
@@ -1968,8 +2015,8 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1968 | * will point to the same string as current_trace->name. | 2015 | * will point to the same string as current_trace->name. |
1969 | */ | 2016 | */ |
1970 | mutex_lock(&trace_types_lock); | 2017 | mutex_lock(&trace_types_lock); |
1971 | if (unlikely(current_trace && iter->trace->name != current_trace->name)) | 2018 | if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) |
1972 | *iter->trace = *current_trace; | 2019 | *iter->trace = *tr->current_trace; |
1973 | mutex_unlock(&trace_types_lock); | 2020 | mutex_unlock(&trace_types_lock); |
1974 | 2021 | ||
1975 | if (iter->snapshot && iter->trace->use_max_tr) | 2022 | if (iter->snapshot && iter->trace->use_max_tr) |
@@ -2099,7 +2146,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
2099 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 2146 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
2100 | struct trace_array *tr = iter->tr; | 2147 | struct trace_array *tr = iter->tr; |
2101 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 2148 | struct trace_array_cpu *data = tr->data[tr->cpu]; |
2102 | struct tracer *type = current_trace; | 2149 | struct tracer *type = iter->trace; |
2103 | unsigned long entries; | 2150 | unsigned long entries; |
2104 | unsigned long total; | 2151 | unsigned long total; |
2105 | const char *name = "preemption"; | 2152 | const char *name = "preemption"; |
@@ -2478,7 +2525,8 @@ static const struct seq_operations tracer_seq_ops = { | |||
2478 | static struct trace_iterator * | 2525 | static struct trace_iterator * |
2479 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) | 2526 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
2480 | { | 2527 | { |
2481 | long cpu_file = (long) inode->i_private; | 2528 | struct trace_cpu *tc = inode->i_private; |
2529 | struct trace_array *tr = tc->tr; | ||
2482 | struct trace_iterator *iter; | 2530 | struct trace_iterator *iter; |
2483 | int cpu; | 2531 | int cpu; |
2484 | 2532 | ||
@@ -2503,19 +2551,20 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2503 | if (!iter->trace) | 2551 | if (!iter->trace) |
2504 | goto fail; | 2552 | goto fail; |
2505 | 2553 | ||
2506 | *iter->trace = *current_trace; | 2554 | *iter->trace = *tr->current_trace; |
2507 | 2555 | ||
2508 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) | 2556 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
2509 | goto fail; | 2557 | goto fail; |
2510 | 2558 | ||
2511 | if (current_trace->print_max || snapshot) | 2559 | /* Currently only the top directory has a snapshot */ |
2560 | if (tr->current_trace->print_max || snapshot) | ||
2512 | iter->tr = &max_tr; | 2561 | iter->tr = &max_tr; |
2513 | else | 2562 | else |
2514 | iter->tr = &global_trace; | 2563 | iter->tr = tr; |
2515 | iter->snapshot = snapshot; | 2564 | iter->snapshot = snapshot; |
2516 | iter->pos = -1; | 2565 | iter->pos = -1; |
2517 | mutex_init(&iter->mutex); | 2566 | mutex_init(&iter->mutex); |
2518 | iter->cpu_file = cpu_file; | 2567 | iter->cpu_file = tc->cpu; |
2519 | 2568 | ||
2520 | /* Notify the tracer early; before we stop tracing. */ | 2569 | /* Notify the tracer early; before we stop tracing. */ |
2521 | if (iter->trace && iter->trace->open) | 2570 | if (iter->trace && iter->trace->open) |
@@ -2531,7 +2580,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2531 | 2580 | ||
2532 | /* stop the trace while dumping if we are not opening "snapshot" */ | 2581 | /* stop the trace while dumping if we are not opening "snapshot" */ |
2533 | if (!iter->snapshot) | 2582 | if (!iter->snapshot) |
2534 | tracing_stop(); | 2583 | tracing_stop_tr(tr); |
2535 | 2584 | ||
2536 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { | 2585 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
2537 | for_each_tracing_cpu(cpu) { | 2586 | for_each_tracing_cpu(cpu) { |
@@ -2578,6 +2627,7 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2578 | { | 2627 | { |
2579 | struct seq_file *m = file->private_data; | 2628 | struct seq_file *m = file->private_data; |
2580 | struct trace_iterator *iter; | 2629 | struct trace_iterator *iter; |
2630 | struct trace_array *tr; | ||
2581 | int cpu; | 2631 | int cpu; |
2582 | 2632 | ||
2583 | if (!(file->f_mode & FMODE_READ)) | 2633 | if (!(file->f_mode & FMODE_READ)) |
@@ -2585,6 +2635,12 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2585 | 2635 | ||
2586 | iter = m->private; | 2636 | iter = m->private; |
2587 | 2637 | ||
2638 | /* Only the global tracer has a matching max_tr */ | ||
2639 | if (iter->tr == &max_tr) | ||
2640 | tr = &global_trace; | ||
2641 | else | ||
2642 | tr = iter->tr; | ||
2643 | |||
2588 | mutex_lock(&trace_types_lock); | 2644 | mutex_lock(&trace_types_lock); |
2589 | for_each_tracing_cpu(cpu) { | 2645 | for_each_tracing_cpu(cpu) { |
2590 | if (iter->buffer_iter[cpu]) | 2646 | if (iter->buffer_iter[cpu]) |
@@ -2596,7 +2652,7 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2596 | 2652 | ||
2597 | if (!iter->snapshot) | 2653 | if (!iter->snapshot) |
2598 | /* reenable tracing if it was previously enabled */ | 2654 | /* reenable tracing if it was previously enabled */ |
2599 | tracing_start(); | 2655 | tracing_start_tr(tr); |
2600 | mutex_unlock(&trace_types_lock); | 2656 | mutex_unlock(&trace_types_lock); |
2601 | 2657 | ||
2602 | mutex_destroy(&iter->mutex); | 2658 | mutex_destroy(&iter->mutex); |
@@ -2615,12 +2671,13 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2615 | /* If this file was open for write, then erase contents */ | 2671 | /* If this file was open for write, then erase contents */ |
2616 | if ((file->f_mode & FMODE_WRITE) && | 2672 | if ((file->f_mode & FMODE_WRITE) && |
2617 | (file->f_flags & O_TRUNC)) { | 2673 | (file->f_flags & O_TRUNC)) { |
2618 | long cpu = (long) inode->i_private; | 2674 | struct trace_cpu *tc = inode->i_private; |
2675 | struct trace_array *tr = tc->tr; | ||
2619 | 2676 | ||
2620 | if (cpu == RING_BUFFER_ALL_CPUS) | 2677 | if (tc->cpu == RING_BUFFER_ALL_CPUS) |
2621 | tracing_reset_online_cpus(&global_trace); | 2678 | tracing_reset_online_cpus(tr); |
2622 | else | 2679 | else |
2623 | tracing_reset(&global_trace, cpu); | 2680 | tracing_reset(tr, tc->cpu); |
2624 | } | 2681 | } |
2625 | 2682 | ||
2626 | if (file->f_mode & FMODE_READ) { | 2683 | if (file->f_mode & FMODE_READ) { |
@@ -2767,8 +2824,9 @@ static ssize_t | |||
2767 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | 2824 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, |
2768 | size_t count, loff_t *ppos) | 2825 | size_t count, loff_t *ppos) |
2769 | { | 2826 | { |
2770 | int err, cpu; | 2827 | struct trace_array *tr = filp->private_data; |
2771 | cpumask_var_t tracing_cpumask_new; | 2828 | cpumask_var_t tracing_cpumask_new; |
2829 | int err, cpu; | ||
2772 | 2830 | ||
2773 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) | 2831 | if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
2774 | return -ENOMEM; | 2832 | return -ENOMEM; |
@@ -2788,13 +2846,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2788 | */ | 2846 | */ |
2789 | if (cpumask_test_cpu(cpu, tracing_cpumask) && | 2847 | if (cpumask_test_cpu(cpu, tracing_cpumask) && |
2790 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2848 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2791 | atomic_inc(&global_trace.data[cpu]->disabled); | 2849 | atomic_inc(&tr->data[cpu]->disabled); |
2792 | ring_buffer_record_disable_cpu(global_trace.buffer, cpu); | 2850 | ring_buffer_record_disable_cpu(tr->buffer, cpu); |
2793 | } | 2851 | } |
2794 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 2852 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && |
2795 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 2853 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
2796 | atomic_dec(&global_trace.data[cpu]->disabled); | 2854 | atomic_dec(&tr->data[cpu]->disabled); |
2797 | ring_buffer_record_enable_cpu(global_trace.buffer, cpu); | 2855 | ring_buffer_record_enable_cpu(tr->buffer, cpu); |
2798 | } | 2856 | } |
2799 | } | 2857 | } |
2800 | arch_spin_unlock(&ftrace_max_lock); | 2858 | arch_spin_unlock(&ftrace_max_lock); |
@@ -2823,12 +2881,13 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2823 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 2881 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
2824 | { | 2882 | { |
2825 | struct tracer_opt *trace_opts; | 2883 | struct tracer_opt *trace_opts; |
2884 | struct trace_array *tr = m->private; | ||
2826 | u32 tracer_flags; | 2885 | u32 tracer_flags; |
2827 | int i; | 2886 | int i; |
2828 | 2887 | ||
2829 | mutex_lock(&trace_types_lock); | 2888 | mutex_lock(&trace_types_lock); |
2830 | tracer_flags = current_trace->flags->val; | 2889 | tracer_flags = tr->current_trace->flags->val; |
2831 | trace_opts = current_trace->flags->opts; | 2890 | trace_opts = tr->current_trace->flags->opts; |
2832 | 2891 | ||
2833 | for (i = 0; trace_options[i]; i++) { | 2892 | for (i = 0; trace_options[i]; i++) { |
2834 | if (trace_flags & (1 << i)) | 2893 | if (trace_flags & (1 << i)) |
@@ -2892,15 +2951,15 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) | |||
2892 | return 0; | 2951 | return 0; |
2893 | } | 2952 | } |
2894 | 2953 | ||
2895 | int set_tracer_flag(unsigned int mask, int enabled) | 2954 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) |
2896 | { | 2955 | { |
2897 | /* do nothing if flag is already set */ | 2956 | /* do nothing if flag is already set */ |
2898 | if (!!(trace_flags & mask) == !!enabled) | 2957 | if (!!(trace_flags & mask) == !!enabled) |
2899 | return 0; | 2958 | return 0; |
2900 | 2959 | ||
2901 | /* Give the tracer a chance to approve the change */ | 2960 | /* Give the tracer a chance to approve the change */ |
2902 | if (current_trace->flag_changed) | 2961 | if (tr->current_trace->flag_changed) |
2903 | if (current_trace->flag_changed(current_trace, mask, !!enabled)) | 2962 | if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) |
2904 | return -EINVAL; | 2963 | return -EINVAL; |
2905 | 2964 | ||
2906 | if (enabled) | 2965 | if (enabled) |
@@ -2924,7 +2983,7 @@ int set_tracer_flag(unsigned int mask, int enabled) | |||
2924 | return 0; | 2983 | return 0; |
2925 | } | 2984 | } |
2926 | 2985 | ||
2927 | static int trace_set_options(char *option) | 2986 | static int trace_set_options(struct trace_array *tr, char *option) |
2928 | { | 2987 | { |
2929 | char *cmp; | 2988 | char *cmp; |
2930 | int neg = 0; | 2989 | int neg = 0; |
@@ -2942,14 +3001,14 @@ static int trace_set_options(char *option) | |||
2942 | 3001 | ||
2943 | for (i = 0; trace_options[i]; i++) { | 3002 | for (i = 0; trace_options[i]; i++) { |
2944 | if (strcmp(cmp, trace_options[i]) == 0) { | 3003 | if (strcmp(cmp, trace_options[i]) == 0) { |
2945 | ret = set_tracer_flag(1 << i, !neg); | 3004 | ret = set_tracer_flag(tr, 1 << i, !neg); |
2946 | break; | 3005 | break; |
2947 | } | 3006 | } |
2948 | } | 3007 | } |
2949 | 3008 | ||
2950 | /* If no option could be set, test the specific tracer options */ | 3009 | /* If no option could be set, test the specific tracer options */ |
2951 | if (!trace_options[i]) | 3010 | if (!trace_options[i]) |
2952 | ret = set_tracer_option(current_trace, cmp, neg); | 3011 | ret = set_tracer_option(tr->current_trace, cmp, neg); |
2953 | 3012 | ||
2954 | mutex_unlock(&trace_types_lock); | 3013 | mutex_unlock(&trace_types_lock); |
2955 | 3014 | ||
@@ -2960,6 +3019,8 @@ static ssize_t | |||
2960 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 3019 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2961 | size_t cnt, loff_t *ppos) | 3020 | size_t cnt, loff_t *ppos) |
2962 | { | 3021 | { |
3022 | struct seq_file *m = filp->private_data; | ||
3023 | struct trace_array *tr = m->private; | ||
2963 | char buf[64]; | 3024 | char buf[64]; |
2964 | int ret; | 3025 | int ret; |
2965 | 3026 | ||
@@ -2971,7 +3032,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2971 | 3032 | ||
2972 | buf[cnt] = 0; | 3033 | buf[cnt] = 0; |
2973 | 3034 | ||
2974 | ret = trace_set_options(buf); | 3035 | ret = trace_set_options(tr, buf); |
2975 | if (ret < 0) | 3036 | if (ret < 0) |
2976 | return ret; | 3037 | return ret; |
2977 | 3038 | ||
@@ -2984,7 +3045,8 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file) | |||
2984 | { | 3045 | { |
2985 | if (tracing_disabled) | 3046 | if (tracing_disabled) |
2986 | return -ENODEV; | 3047 | return -ENODEV; |
2987 | return single_open(file, tracing_trace_options_show, NULL); | 3048 | |
3049 | return single_open(file, tracing_trace_options_show, inode->i_private); | ||
2988 | } | 3050 | } |
2989 | 3051 | ||
2990 | static const struct file_operations tracing_iter_fops = { | 3052 | static const struct file_operations tracing_iter_fops = { |
@@ -3082,11 +3144,12 @@ static ssize_t | |||
3082 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 3144 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
3083 | size_t cnt, loff_t *ppos) | 3145 | size_t cnt, loff_t *ppos) |
3084 | { | 3146 | { |
3147 | struct trace_array *tr = filp->private_data; | ||
3085 | char buf[MAX_TRACER_SIZE+2]; | 3148 | char buf[MAX_TRACER_SIZE+2]; |
3086 | int r; | 3149 | int r; |
3087 | 3150 | ||
3088 | mutex_lock(&trace_types_lock); | 3151 | mutex_lock(&trace_types_lock); |
3089 | r = sprintf(buf, "%s\n", current_trace->name); | 3152 | r = sprintf(buf, "%s\n", tr->current_trace->name); |
3090 | mutex_unlock(&trace_types_lock); | 3153 | mutex_unlock(&trace_types_lock); |
3091 | 3154 | ||
3092 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3155 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
@@ -3130,7 +3193,8 @@ static int resize_buffer_duplicate_size(struct trace_array *tr, | |||
3130 | return ret; | 3193 | return ret; |
3131 | } | 3194 | } |
3132 | 3195 | ||
3133 | static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | 3196 | static int __tracing_resize_ring_buffer(struct trace_array *tr, |
3197 | unsigned long size, int cpu) | ||
3134 | { | 3198 | { |
3135 | int ret; | 3199 | int ret; |
3136 | 3200 | ||
@@ -3142,20 +3206,20 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | |||
3142 | ring_buffer_expanded = 1; | 3206 | ring_buffer_expanded = 1; |
3143 | 3207 | ||
3144 | /* May be called before buffers are initialized */ | 3208 | /* May be called before buffers are initialized */ |
3145 | if (!global_trace.buffer) | 3209 | if (!tr->buffer) |
3146 | return 0; | 3210 | return 0; |
3147 | 3211 | ||
3148 | ret = ring_buffer_resize(global_trace.buffer, size, cpu); | 3212 | ret = ring_buffer_resize(tr->buffer, size, cpu); |
3149 | if (ret < 0) | 3213 | if (ret < 0) |
3150 | return ret; | 3214 | return ret; |
3151 | 3215 | ||
3152 | if (!current_trace->use_max_tr) | 3216 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || |
3217 | !tr->current_trace->use_max_tr) | ||
3153 | goto out; | 3218 | goto out; |
3154 | 3219 | ||
3155 | ret = ring_buffer_resize(max_tr.buffer, size, cpu); | 3220 | ret = ring_buffer_resize(max_tr.buffer, size, cpu); |
3156 | if (ret < 0) { | 3221 | if (ret < 0) { |
3157 | int r = resize_buffer_duplicate_size(&global_trace, | 3222 | int r = resize_buffer_duplicate_size(tr, tr, cpu); |
3158 | &global_trace, cpu); | ||
3159 | if (r < 0) { | 3223 | if (r < 0) { |
3160 | /* | 3224 | /* |
3161 | * AARGH! We are left with different | 3225 | * AARGH! We are left with different |
@@ -3184,14 +3248,15 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | |||
3184 | 3248 | ||
3185 | out: | 3249 | out: |
3186 | if (cpu == RING_BUFFER_ALL_CPUS) | 3250 | if (cpu == RING_BUFFER_ALL_CPUS) |
3187 | set_buffer_entries(&global_trace, size); | 3251 | set_buffer_entries(tr, size); |
3188 | else | 3252 | else |
3189 | global_trace.data[cpu]->entries = size; | 3253 | tr->data[cpu]->entries = size; |
3190 | 3254 | ||
3191 | return ret; | 3255 | return ret; |
3192 | } | 3256 | } |
3193 | 3257 | ||
3194 | static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) | 3258 | static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
3259 | unsigned long size, int cpu_id) | ||
3195 | { | 3260 | { |
3196 | int ret = size; | 3261 | int ret = size; |
3197 | 3262 | ||
@@ -3205,7 +3270,7 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) | |||
3205 | } | 3270 | } |
3206 | } | 3271 | } |
3207 | 3272 | ||
3208 | ret = __tracing_resize_ring_buffer(size, cpu_id); | 3273 | ret = __tracing_resize_ring_buffer(tr, size, cpu_id); |
3209 | if (ret < 0) | 3274 | if (ret < 0) |
3210 | ret = -ENOMEM; | 3275 | ret = -ENOMEM; |
3211 | 3276 | ||
@@ -3232,7 +3297,7 @@ int tracing_update_buffers(void) | |||
3232 | 3297 | ||
3233 | mutex_lock(&trace_types_lock); | 3298 | mutex_lock(&trace_types_lock); |
3234 | if (!ring_buffer_expanded) | 3299 | if (!ring_buffer_expanded) |
3235 | ret = __tracing_resize_ring_buffer(trace_buf_size, | 3300 | ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, |
3236 | RING_BUFFER_ALL_CPUS); | 3301 | RING_BUFFER_ALL_CPUS); |
3237 | mutex_unlock(&trace_types_lock); | 3302 | mutex_unlock(&trace_types_lock); |
3238 | 3303 | ||
@@ -3242,7 +3307,7 @@ int tracing_update_buffers(void) | |||
3242 | struct trace_option_dentry; | 3307 | struct trace_option_dentry; |
3243 | 3308 | ||
3244 | static struct trace_option_dentry * | 3309 | static struct trace_option_dentry * |
3245 | create_trace_option_files(struct tracer *tracer); | 3310 | create_trace_option_files(struct trace_array *tr, struct tracer *tracer); |
3246 | 3311 | ||
3247 | static void | 3312 | static void |
3248 | destroy_trace_option_files(struct trace_option_dentry *topts); | 3313 | destroy_trace_option_files(struct trace_option_dentry *topts); |
@@ -3258,7 +3323,7 @@ static int tracing_set_tracer(const char *buf) | |||
3258 | mutex_lock(&trace_types_lock); | 3323 | mutex_lock(&trace_types_lock); |
3259 | 3324 | ||
3260 | if (!ring_buffer_expanded) { | 3325 | if (!ring_buffer_expanded) { |
3261 | ret = __tracing_resize_ring_buffer(trace_buf_size, | 3326 | ret = __tracing_resize_ring_buffer(tr, trace_buf_size, |
3262 | RING_BUFFER_ALL_CPUS); | 3327 | RING_BUFFER_ALL_CPUS); |
3263 | if (ret < 0) | 3328 | if (ret < 0) |
3264 | goto out; | 3329 | goto out; |
@@ -3273,18 +3338,18 @@ static int tracing_set_tracer(const char *buf) | |||
3273 | ret = -EINVAL; | 3338 | ret = -EINVAL; |
3274 | goto out; | 3339 | goto out; |
3275 | } | 3340 | } |
3276 | if (t == current_trace) | 3341 | if (t == tr->current_trace) |
3277 | goto out; | 3342 | goto out; |
3278 | 3343 | ||
3279 | trace_branch_disable(); | 3344 | trace_branch_disable(); |
3280 | 3345 | ||
3281 | current_trace->enabled = false; | 3346 | tr->current_trace->enabled = false; |
3282 | 3347 | ||
3283 | if (current_trace->reset) | 3348 | if (tr->current_trace->reset) |
3284 | current_trace->reset(tr); | 3349 | tr->current_trace->reset(tr); |
3285 | 3350 | ||
3286 | had_max_tr = current_trace->allocated_snapshot; | 3351 | had_max_tr = tr->current_trace->allocated_snapshot; |
3287 | current_trace = &nop_trace; | 3352 | tr->current_trace = &nop_trace; |
3288 | 3353 | ||
3289 | if (had_max_tr && !t->use_max_tr) { | 3354 | if (had_max_tr && !t->use_max_tr) { |
3290 | /* | 3355 | /* |
@@ -3303,11 +3368,11 @@ static int tracing_set_tracer(const char *buf) | |||
3303 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); | 3368 | ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); |
3304 | set_buffer_entries(&max_tr, 1); | 3369 | set_buffer_entries(&max_tr, 1); |
3305 | tracing_reset_online_cpus(&max_tr); | 3370 | tracing_reset_online_cpus(&max_tr); |
3306 | current_trace->allocated_snapshot = false; | 3371 | tr->current_trace->allocated_snapshot = false; |
3307 | } | 3372 | } |
3308 | destroy_trace_option_files(topts); | 3373 | destroy_trace_option_files(topts); |
3309 | 3374 | ||
3310 | topts = create_trace_option_files(t); | 3375 | topts = create_trace_option_files(tr, t); |
3311 | if (t->use_max_tr && !had_max_tr) { | 3376 | if (t->use_max_tr && !had_max_tr) { |
3312 | /* we need to make per cpu buffer sizes equivalent */ | 3377 | /* we need to make per cpu buffer sizes equivalent */ |
3313 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, | 3378 | ret = resize_buffer_duplicate_size(&max_tr, &global_trace, |
@@ -3323,8 +3388,8 @@ static int tracing_set_tracer(const char *buf) | |||
3323 | goto out; | 3388 | goto out; |
3324 | } | 3389 | } |
3325 | 3390 | ||
3326 | current_trace = t; | 3391 | tr->current_trace = t; |
3327 | current_trace->enabled = true; | 3392 | tr->current_trace->enabled = true; |
3328 | trace_branch_enable(tr); | 3393 | trace_branch_enable(tr); |
3329 | out: | 3394 | out: |
3330 | mutex_unlock(&trace_types_lock); | 3395 | mutex_unlock(&trace_types_lock); |
@@ -3398,7 +3463,8 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
3398 | 3463 | ||
3399 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 3464 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
3400 | { | 3465 | { |
3401 | long cpu_file = (long) inode->i_private; | 3466 | struct trace_cpu *tc = inode->i_private; |
3467 | struct trace_array *tr = tc->tr; | ||
3402 | struct trace_iterator *iter; | 3468 | struct trace_iterator *iter; |
3403 | int ret = 0; | 3469 | int ret = 0; |
3404 | 3470 | ||
@@ -3423,7 +3489,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3423 | ret = -ENOMEM; | 3489 | ret = -ENOMEM; |
3424 | goto fail; | 3490 | goto fail; |
3425 | } | 3491 | } |
3426 | *iter->trace = *current_trace; | 3492 | *iter->trace = *tr->current_trace; |
3427 | 3493 | ||
3428 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 3494 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
3429 | ret = -ENOMEM; | 3495 | ret = -ENOMEM; |
@@ -3440,8 +3506,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3440 | if (trace_clocks[trace_clock_id].in_ns) | 3506 | if (trace_clocks[trace_clock_id].in_ns) |
3441 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 3507 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
3442 | 3508 | ||
3443 | iter->cpu_file = cpu_file; | 3509 | iter->cpu_file = tc->cpu; |
3444 | iter->tr = &global_trace; | 3510 | iter->tr = tc->tr; |
3445 | mutex_init(&iter->mutex); | 3511 | mutex_init(&iter->mutex); |
3446 | filp->private_data = iter; | 3512 | filp->private_data = iter; |
3447 | 3513 | ||
@@ -3563,6 +3629,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
3563 | size_t cnt, loff_t *ppos) | 3629 | size_t cnt, loff_t *ppos) |
3564 | { | 3630 | { |
3565 | struct trace_iterator *iter = filp->private_data; | 3631 | struct trace_iterator *iter = filp->private_data; |
3632 | struct trace_array *tr = iter->tr; | ||
3566 | ssize_t sret; | 3633 | ssize_t sret; |
3567 | 3634 | ||
3568 | /* return any leftover data */ | 3635 | /* return any leftover data */ |
@@ -3574,8 +3641,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, | |||
3574 | 3641 | ||
3575 | /* copy the tracer to avoid using a global lock all around */ | 3642 | /* copy the tracer to avoid using a global lock all around */ |
3576 | mutex_lock(&trace_types_lock); | 3643 | mutex_lock(&trace_types_lock); |
3577 | if (unlikely(iter->trace->name != current_trace->name)) | 3644 | if (unlikely(iter->trace->name != tr->current_trace->name)) |
3578 | *iter->trace = *current_trace; | 3645 | *iter->trace = *tr->current_trace; |
3579 | mutex_unlock(&trace_types_lock); | 3646 | mutex_unlock(&trace_types_lock); |
3580 | 3647 | ||
3581 | /* | 3648 | /* |
@@ -3731,6 +3798,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3731 | .ops = &tracing_pipe_buf_ops, | 3798 | .ops = &tracing_pipe_buf_ops, |
3732 | .spd_release = tracing_spd_release_pipe, | 3799 | .spd_release = tracing_spd_release_pipe, |
3733 | }; | 3800 | }; |
3801 | struct trace_array *tr = iter->tr; | ||
3734 | ssize_t ret; | 3802 | ssize_t ret; |
3735 | size_t rem; | 3803 | size_t rem; |
3736 | unsigned int i; | 3804 | unsigned int i; |
@@ -3740,8 +3808,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
3740 | 3808 | ||
3741 | /* copy the tracer to avoid using a global lock all around */ | 3809 | /* copy the tracer to avoid using a global lock all around */ |
3742 | mutex_lock(&trace_types_lock); | 3810 | mutex_lock(&trace_types_lock); |
3743 | if (unlikely(iter->trace->name != current_trace->name)) | 3811 | if (unlikely(iter->trace->name != tr->current_trace->name)) |
3744 | *iter->trace = *current_trace; | 3812 | *iter->trace = *tr->current_trace; |
3745 | mutex_unlock(&trace_types_lock); | 3813 | mutex_unlock(&trace_types_lock); |
3746 | 3814 | ||
3747 | mutex_lock(&iter->mutex); | 3815 | mutex_lock(&iter->mutex); |
@@ -3803,43 +3871,19 @@ out_err: | |||
3803 | goto out; | 3871 | goto out; |
3804 | } | 3872 | } |
3805 | 3873 | ||
3806 | struct ftrace_entries_info { | ||
3807 | struct trace_array *tr; | ||
3808 | int cpu; | ||
3809 | }; | ||
3810 | |||
3811 | static int tracing_entries_open(struct inode *inode, struct file *filp) | ||
3812 | { | ||
3813 | struct ftrace_entries_info *info; | ||
3814 | |||
3815 | if (tracing_disabled) | ||
3816 | return -ENODEV; | ||
3817 | |||
3818 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
3819 | if (!info) | ||
3820 | return -ENOMEM; | ||
3821 | |||
3822 | info->tr = &global_trace; | ||
3823 | info->cpu = (unsigned long)inode->i_private; | ||
3824 | |||
3825 | filp->private_data = info; | ||
3826 | |||
3827 | return 0; | ||
3828 | } | ||
3829 | |||
3830 | static ssize_t | 3874 | static ssize_t |
3831 | tracing_entries_read(struct file *filp, char __user *ubuf, | 3875 | tracing_entries_read(struct file *filp, char __user *ubuf, |
3832 | size_t cnt, loff_t *ppos) | 3876 | size_t cnt, loff_t *ppos) |
3833 | { | 3877 | { |
3834 | struct ftrace_entries_info *info = filp->private_data; | 3878 | struct trace_cpu *tc = filp->private_data; |
3835 | struct trace_array *tr = info->tr; | 3879 | struct trace_array *tr = tc->tr; |
3836 | char buf[64]; | 3880 | char buf[64]; |
3837 | int r = 0; | 3881 | int r = 0; |
3838 | ssize_t ret; | 3882 | ssize_t ret; |
3839 | 3883 | ||
3840 | mutex_lock(&trace_types_lock); | 3884 | mutex_lock(&trace_types_lock); |
3841 | 3885 | ||
3842 | if (info->cpu == RING_BUFFER_ALL_CPUS) { | 3886 | if (tc->cpu == RING_BUFFER_ALL_CPUS) { |
3843 | int cpu, buf_size_same; | 3887 | int cpu, buf_size_same; |
3844 | unsigned long size; | 3888 | unsigned long size; |
3845 | 3889 | ||
@@ -3866,7 +3910,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
3866 | } else | 3910 | } else |
3867 | r = sprintf(buf, "X\n"); | 3911 | r = sprintf(buf, "X\n"); |
3868 | } else | 3912 | } else |
3869 | r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10); | 3913 | r = sprintf(buf, "%lu\n", tr->data[tc->cpu]->entries >> 10); |
3870 | 3914 | ||
3871 | mutex_unlock(&trace_types_lock); | 3915 | mutex_unlock(&trace_types_lock); |
3872 | 3916 | ||
@@ -3878,7 +3922,7 @@ static ssize_t | |||
3878 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 3922 | tracing_entries_write(struct file *filp, const char __user *ubuf, |
3879 | size_t cnt, loff_t *ppos) | 3923 | size_t cnt, loff_t *ppos) |
3880 | { | 3924 | { |
3881 | struct ftrace_entries_info *info = filp->private_data; | 3925 | struct trace_cpu *tc = filp->private_data; |
3882 | unsigned long val; | 3926 | unsigned long val; |
3883 | int ret; | 3927 | int ret; |
3884 | 3928 | ||
@@ -3893,7 +3937,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3893 | /* value is in KB */ | 3937 | /* value is in KB */ |
3894 | val <<= 10; | 3938 | val <<= 10; |
3895 | 3939 | ||
3896 | ret = tracing_resize_ring_buffer(val, info->cpu); | 3940 | ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu); |
3897 | if (ret < 0) | 3941 | if (ret < 0) |
3898 | return ret; | 3942 | return ret; |
3899 | 3943 | ||
@@ -3902,16 +3946,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3902 | return cnt; | 3946 | return cnt; |
3903 | } | 3947 | } |
3904 | 3948 | ||
3905 | static int | ||
3906 | tracing_entries_release(struct inode *inode, struct file *filp) | ||
3907 | { | ||
3908 | struct ftrace_entries_info *info = filp->private_data; | ||
3909 | |||
3910 | kfree(info); | ||
3911 | |||
3912 | return 0; | ||
3913 | } | ||
3914 | |||
3915 | static ssize_t | 3949 | static ssize_t |
3916 | tracing_total_entries_read(struct file *filp, char __user *ubuf, | 3950 | tracing_total_entries_read(struct file *filp, char __user *ubuf, |
3917 | size_t cnt, loff_t *ppos) | 3951 | size_t cnt, loff_t *ppos) |
@@ -3953,11 +3987,13 @@ tracing_free_buffer_write(struct file *filp, const char __user *ubuf, | |||
3953 | static int | 3987 | static int |
3954 | tracing_free_buffer_release(struct inode *inode, struct file *filp) | 3988 | tracing_free_buffer_release(struct inode *inode, struct file *filp) |
3955 | { | 3989 | { |
3990 | struct trace_array *tr = inode->i_private; | ||
3991 | |||
3956 | /* disable tracing ? */ | 3992 | /* disable tracing ? */ |
3957 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 3993 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) |
3958 | tracing_off(); | 3994 | tracing_off(); |
3959 | /* resize the ring buffer to 0 */ | 3995 | /* resize the ring buffer to 0 */ |
3960 | tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS); | 3996 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); |
3961 | 3997 | ||
3962 | return 0; | 3998 | return 0; |
3963 | } | 3999 | } |
@@ -4068,13 +4104,14 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
4068 | 4104 | ||
4069 | static int tracing_clock_show(struct seq_file *m, void *v) | 4105 | static int tracing_clock_show(struct seq_file *m, void *v) |
4070 | { | 4106 | { |
4107 | struct trace_array *tr = m->private; | ||
4071 | int i; | 4108 | int i; |
4072 | 4109 | ||
4073 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 4110 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
4074 | seq_printf(m, | 4111 | seq_printf(m, |
4075 | "%s%s%s%s", i ? " " : "", | 4112 | "%s%s%s%s", i ? " " : "", |
4076 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 4113 | i == tr->clock_id ? "[" : "", trace_clocks[i].name, |
4077 | i == trace_clock_id ? "]" : ""); | 4114 | i == tr->clock_id ? "]" : ""); |
4078 | seq_putc(m, '\n'); | 4115 | seq_putc(m, '\n'); |
4079 | 4116 | ||
4080 | return 0; | 4117 | return 0; |
@@ -4083,6 +4120,8 @@ static int tracing_clock_show(struct seq_file *m, void *v) | |||
4083 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 4120 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
4084 | size_t cnt, loff_t *fpos) | 4121 | size_t cnt, loff_t *fpos) |
4085 | { | 4122 | { |
4123 | struct seq_file *m = filp->private_data; | ||
4124 | struct trace_array *tr = m->private; | ||
4086 | char buf[64]; | 4125 | char buf[64]; |
4087 | const char *clockstr; | 4126 | const char *clockstr; |
4088 | int i; | 4127 | int i; |
@@ -4104,12 +4143,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4104 | if (i == ARRAY_SIZE(trace_clocks)) | 4143 | if (i == ARRAY_SIZE(trace_clocks)) |
4105 | return -EINVAL; | 4144 | return -EINVAL; |
4106 | 4145 | ||
4107 | trace_clock_id = i; | ||
4108 | |||
4109 | mutex_lock(&trace_types_lock); | 4146 | mutex_lock(&trace_types_lock); |
4110 | 4147 | ||
4111 | ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); | 4148 | tr->clock_id = i; |
4112 | if (max_tr.buffer) | 4149 | |
4150 | ring_buffer_set_clock(tr->buffer, trace_clocks[i].func); | ||
4151 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && max_tr.buffer) | ||
4113 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); | 4152 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); |
4114 | 4153 | ||
4115 | /* | 4154 | /* |
@@ -4130,20 +4169,37 @@ static int tracing_clock_open(struct inode *inode, struct file *file) | |||
4130 | { | 4169 | { |
4131 | if (tracing_disabled) | 4170 | if (tracing_disabled) |
4132 | return -ENODEV; | 4171 | return -ENODEV; |
4133 | return single_open(file, tracing_clock_show, NULL); | 4172 | |
4173 | return single_open(file, tracing_clock_show, inode->i_private); | ||
4134 | } | 4174 | } |
4135 | 4175 | ||
4136 | #ifdef CONFIG_TRACER_SNAPSHOT | 4176 | #ifdef CONFIG_TRACER_SNAPSHOT |
4137 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | 4177 | static int tracing_snapshot_open(struct inode *inode, struct file *file) |
4138 | { | 4178 | { |
4179 | struct trace_cpu *tc = inode->i_private; | ||
4139 | struct trace_iterator *iter; | 4180 | struct trace_iterator *iter; |
4181 | struct seq_file *m; | ||
4140 | int ret = 0; | 4182 | int ret = 0; |
4141 | 4183 | ||
4142 | if (file->f_mode & FMODE_READ) { | 4184 | if (file->f_mode & FMODE_READ) { |
4143 | iter = __tracing_open(inode, file, true); | 4185 | iter = __tracing_open(inode, file, true); |
4144 | if (IS_ERR(iter)) | 4186 | if (IS_ERR(iter)) |
4145 | ret = PTR_ERR(iter); | 4187 | ret = PTR_ERR(iter); |
4188 | } else { | ||
4189 | /* Writes still need the seq_file to hold the private data */ | ||
4190 | m = kzalloc(sizeof(*m), GFP_KERNEL); | ||
4191 | if (!m) | ||
4192 | return -ENOMEM; | ||
4193 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
4194 | if (!iter) { | ||
4195 | kfree(m); | ||
4196 | return -ENOMEM; | ||
4197 | } | ||
4198 | iter->tr = tc->tr; | ||
4199 | m->private = iter; | ||
4200 | file->private_data = m; | ||
4146 | } | 4201 | } |
4202 | |||
4147 | return ret; | 4203 | return ret; |
4148 | } | 4204 | } |
4149 | 4205 | ||
@@ -4151,6 +4207,9 @@ static ssize_t | |||
4151 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | 4207 | tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, |
4152 | loff_t *ppos) | 4208 | loff_t *ppos) |
4153 | { | 4209 | { |
4210 | struct seq_file *m = filp->private_data; | ||
4211 | struct trace_iterator *iter = m->private; | ||
4212 | struct trace_array *tr = iter->tr; | ||
4154 | unsigned long val; | 4213 | unsigned long val; |
4155 | int ret; | 4214 | int ret; |
4156 | 4215 | ||
@@ -4164,30 +4223,30 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4164 | 4223 | ||
4165 | mutex_lock(&trace_types_lock); | 4224 | mutex_lock(&trace_types_lock); |
4166 | 4225 | ||
4167 | if (current_trace->use_max_tr) { | 4226 | if (tr->current_trace->use_max_tr) { |
4168 | ret = -EBUSY; | 4227 | ret = -EBUSY; |
4169 | goto out; | 4228 | goto out; |
4170 | } | 4229 | } |
4171 | 4230 | ||
4172 | switch (val) { | 4231 | switch (val) { |
4173 | case 0: | 4232 | case 0: |
4174 | if (current_trace->allocated_snapshot) { | 4233 | if (tr->current_trace->allocated_snapshot) { |
4175 | /* free spare buffer */ | 4234 | /* free spare buffer */ |
4176 | ring_buffer_resize(max_tr.buffer, 1, | 4235 | ring_buffer_resize(max_tr.buffer, 1, |
4177 | RING_BUFFER_ALL_CPUS); | 4236 | RING_BUFFER_ALL_CPUS); |
4178 | set_buffer_entries(&max_tr, 1); | 4237 | set_buffer_entries(&max_tr, 1); |
4179 | tracing_reset_online_cpus(&max_tr); | 4238 | tracing_reset_online_cpus(&max_tr); |
4180 | current_trace->allocated_snapshot = false; | 4239 | tr->current_trace->allocated_snapshot = false; |
4181 | } | 4240 | } |
4182 | break; | 4241 | break; |
4183 | case 1: | 4242 | case 1: |
4184 | if (!current_trace->allocated_snapshot) { | 4243 | if (!tr->current_trace->allocated_snapshot) { |
4185 | /* allocate spare buffer */ | 4244 | /* allocate spare buffer */ |
4186 | ret = resize_buffer_duplicate_size(&max_tr, | 4245 | ret = resize_buffer_duplicate_size(&max_tr, |
4187 | &global_trace, RING_BUFFER_ALL_CPUS); | 4246 | &global_trace, RING_BUFFER_ALL_CPUS); |
4188 | if (ret < 0) | 4247 | if (ret < 0) |
4189 | break; | 4248 | break; |
4190 | current_trace->allocated_snapshot = true; | 4249 | tr->current_trace->allocated_snapshot = true; |
4191 | } | 4250 | } |
4192 | 4251 | ||
4193 | local_irq_disable(); | 4252 | local_irq_disable(); |
@@ -4196,7 +4255,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4196 | local_irq_enable(); | 4255 | local_irq_enable(); |
4197 | break; | 4256 | break; |
4198 | default: | 4257 | default: |
4199 | if (current_trace->allocated_snapshot) | 4258 | if (tr->current_trace->allocated_snapshot) |
4200 | tracing_reset_online_cpus(&max_tr); | 4259 | tracing_reset_online_cpus(&max_tr); |
4201 | break; | 4260 | break; |
4202 | } | 4261 | } |
@@ -4209,6 +4268,22 @@ out: | |||
4209 | mutex_unlock(&trace_types_lock); | 4268 | mutex_unlock(&trace_types_lock); |
4210 | return ret; | 4269 | return ret; |
4211 | } | 4270 | } |
4271 | |||
4272 | static int tracing_snapshot_release(struct inode *inode, struct file *file) | ||
4273 | { | ||
4274 | struct seq_file *m = file->private_data; | ||
4275 | |||
4276 | if (file->f_mode & FMODE_READ) | ||
4277 | return tracing_release(inode, file); | ||
4278 | |||
4279 | /* If write only, the seq_file is just a stub */ | ||
4280 | if (m) | ||
4281 | kfree(m->private); | ||
4282 | kfree(m); | ||
4283 | |||
4284 | return 0; | ||
4285 | } | ||
4286 | |||
4212 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 4287 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
4213 | 4288 | ||
4214 | 4289 | ||
@@ -4236,10 +4311,9 @@ static const struct file_operations tracing_pipe_fops = { | |||
4236 | }; | 4311 | }; |
4237 | 4312 | ||
4238 | static const struct file_operations tracing_entries_fops = { | 4313 | static const struct file_operations tracing_entries_fops = { |
4239 | .open = tracing_entries_open, | 4314 | .open = tracing_open_generic, |
4240 | .read = tracing_entries_read, | 4315 | .read = tracing_entries_read, |
4241 | .write = tracing_entries_write, | 4316 | .write = tracing_entries_write, |
4242 | .release = tracing_entries_release, | ||
4243 | .llseek = generic_file_llseek, | 4317 | .llseek = generic_file_llseek, |
4244 | }; | 4318 | }; |
4245 | 4319 | ||
@@ -4274,7 +4348,7 @@ static const struct file_operations snapshot_fops = { | |||
4274 | .read = seq_read, | 4348 | .read = seq_read, |
4275 | .write = tracing_snapshot_write, | 4349 | .write = tracing_snapshot_write, |
4276 | .llseek = tracing_seek, | 4350 | .llseek = tracing_seek, |
4277 | .release = tracing_release, | 4351 | .release = tracing_snapshot_release, |
4278 | }; | 4352 | }; |
4279 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 4353 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
4280 | 4354 | ||
@@ -4287,7 +4361,8 @@ struct ftrace_buffer_info { | |||
4287 | 4361 | ||
4288 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 4362 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
4289 | { | 4363 | { |
4290 | int cpu = (int)(long)inode->i_private; | 4364 | struct trace_cpu *tc = inode->i_private; |
4365 | struct trace_array *tr = tc->tr; | ||
4291 | struct ftrace_buffer_info *info; | 4366 | struct ftrace_buffer_info *info; |
4292 | 4367 | ||
4293 | if (tracing_disabled) | 4368 | if (tracing_disabled) |
@@ -4297,8 +4372,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) | |||
4297 | if (!info) | 4372 | if (!info) |
4298 | return -ENOMEM; | 4373 | return -ENOMEM; |
4299 | 4374 | ||
4300 | info->tr = &global_trace; | 4375 | info->tr = tr; |
4301 | info->cpu = cpu; | 4376 | info->cpu = tc->cpu; |
4302 | info->spare = NULL; | 4377 | info->spare = NULL; |
4303 | /* Force reading ring buffer for first read */ | 4378 | /* Force reading ring buffer for first read */ |
4304 | info->read = (unsigned int)-1; | 4379 | info->read = (unsigned int)-1; |
@@ -4535,12 +4610,13 @@ static ssize_t | |||
4535 | tracing_stats_read(struct file *filp, char __user *ubuf, | 4610 | tracing_stats_read(struct file *filp, char __user *ubuf, |
4536 | size_t count, loff_t *ppos) | 4611 | size_t count, loff_t *ppos) |
4537 | { | 4612 | { |
4538 | unsigned long cpu = (unsigned long)filp->private_data; | 4613 | struct trace_cpu *tc = filp->private_data; |
4539 | struct trace_array *tr = &global_trace; | 4614 | struct trace_array *tr = tc->tr; |
4540 | struct trace_seq *s; | 4615 | struct trace_seq *s; |
4541 | unsigned long cnt; | 4616 | unsigned long cnt; |
4542 | unsigned long long t; | 4617 | unsigned long long t; |
4543 | unsigned long usec_rem; | 4618 | unsigned long usec_rem; |
4619 | int cpu = tc->cpu; | ||
4544 | 4620 | ||
4545 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 4621 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
4546 | if (!s) | 4622 | if (!s) |
@@ -4636,58 +4712,57 @@ static const struct file_operations tracing_dyn_info_fops = { | |||
4636 | }; | 4712 | }; |
4637 | #endif | 4713 | #endif |
4638 | 4714 | ||
4639 | static struct dentry *d_tracer; | 4715 | struct dentry *tracing_init_dentry_tr(struct trace_array *tr) |
4640 | |||
4641 | struct dentry *tracing_init_dentry(void) | ||
4642 | { | 4716 | { |
4643 | static int once; | 4717 | static int once; |
4644 | 4718 | ||
4645 | if (d_tracer) | 4719 | if (tr->dir) |
4646 | return d_tracer; | 4720 | return tr->dir; |
4647 | 4721 | ||
4648 | if (!debugfs_initialized()) | 4722 | if (!debugfs_initialized()) |
4649 | return NULL; | 4723 | return NULL; |
4650 | 4724 | ||
4651 | d_tracer = debugfs_create_dir("tracing", NULL); | 4725 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) |
4726 | tr->dir = debugfs_create_dir("tracing", NULL); | ||
4652 | 4727 | ||
4653 | if (!d_tracer && !once) { | 4728 | if (!tr->dir && !once) { |
4654 | once = 1; | 4729 | once = 1; |
4655 | pr_warning("Could not create debugfs directory 'tracing'\n"); | 4730 | pr_warning("Could not create debugfs directory 'tracing'\n"); |
4656 | return NULL; | 4731 | return NULL; |
4657 | } | 4732 | } |
4658 | 4733 | ||
4659 | return d_tracer; | 4734 | return tr->dir; |
4660 | } | 4735 | } |
4661 | 4736 | ||
4662 | static struct dentry *d_percpu; | 4737 | struct dentry *tracing_init_dentry(void) |
4738 | { | ||
4739 | return tracing_init_dentry_tr(&global_trace); | ||
4740 | } | ||
4663 | 4741 | ||
4664 | static struct dentry *tracing_dentry_percpu(void) | 4742 | static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) |
4665 | { | 4743 | { |
4666 | static int once; | ||
4667 | struct dentry *d_tracer; | 4744 | struct dentry *d_tracer; |
4668 | 4745 | ||
4669 | if (d_percpu) | 4746 | if (tr->percpu_dir) |
4670 | return d_percpu; | 4747 | return tr->percpu_dir; |
4671 | |||
4672 | d_tracer = tracing_init_dentry(); | ||
4673 | 4748 | ||
4749 | d_tracer = tracing_init_dentry_tr(tr); | ||
4674 | if (!d_tracer) | 4750 | if (!d_tracer) |
4675 | return NULL; | 4751 | return NULL; |
4676 | 4752 | ||
4677 | d_percpu = debugfs_create_dir("per_cpu", d_tracer); | 4753 | tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer); |
4678 | 4754 | ||
4679 | if (!d_percpu && !once) { | 4755 | WARN_ONCE(!tr->percpu_dir, |
4680 | once = 1; | 4756 | "Could not create debugfs directory 'per_cpu/%d'\n", cpu); |
4681 | pr_warning("Could not create debugfs directory 'per_cpu'\n"); | ||
4682 | return NULL; | ||
4683 | } | ||
4684 | 4757 | ||
4685 | return d_percpu; | 4758 | return tr->percpu_dir; |
4686 | } | 4759 | } |
4687 | 4760 | ||
4688 | static void tracing_init_debugfs_percpu(long cpu) | 4761 | static void |
4762 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | ||
4689 | { | 4763 | { |
4690 | struct dentry *d_percpu = tracing_dentry_percpu(); | 4764 | struct trace_array_cpu *data = tr->data[cpu]; |
4765 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); | ||
4691 | struct dentry *d_cpu; | 4766 | struct dentry *d_cpu; |
4692 | char cpu_dir[30]; /* 30 characters should be more than enough */ | 4767 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
4693 | 4768 | ||
@@ -4703,20 +4778,20 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
4703 | 4778 | ||
4704 | /* per cpu trace_pipe */ | 4779 | /* per cpu trace_pipe */ |
4705 | trace_create_file("trace_pipe", 0444, d_cpu, | 4780 | trace_create_file("trace_pipe", 0444, d_cpu, |
4706 | (void *) cpu, &tracing_pipe_fops); | 4781 | (void *)&data->trace_cpu, &tracing_pipe_fops); |
4707 | 4782 | ||
4708 | /* per cpu trace */ | 4783 | /* per cpu trace */ |
4709 | trace_create_file("trace", 0644, d_cpu, | 4784 | trace_create_file("trace", 0644, d_cpu, |
4710 | (void *) cpu, &tracing_fops); | 4785 | (void *)&data->trace_cpu, &tracing_fops); |
4711 | 4786 | ||
4712 | trace_create_file("trace_pipe_raw", 0444, d_cpu, | 4787 | trace_create_file("trace_pipe_raw", 0444, d_cpu, |
4713 | (void *) cpu, &tracing_buffers_fops); | 4788 | (void *)&data->trace_cpu, &tracing_buffers_fops); |
4714 | 4789 | ||
4715 | trace_create_file("stats", 0444, d_cpu, | 4790 | trace_create_file("stats", 0444, d_cpu, |
4716 | (void *) cpu, &tracing_stats_fops); | 4791 | (void *)&data->trace_cpu, &tracing_stats_fops); |
4717 | 4792 | ||
4718 | trace_create_file("buffer_size_kb", 0444, d_cpu, | 4793 | trace_create_file("buffer_size_kb", 0444, d_cpu, |
4719 | (void *) cpu, &tracing_entries_fops); | 4794 | (void *)&data->trace_cpu, &tracing_entries_fops); |
4720 | } | 4795 | } |
4721 | 4796 | ||
4722 | #ifdef CONFIG_FTRACE_SELFTEST | 4797 | #ifdef CONFIG_FTRACE_SELFTEST |
@@ -4727,6 +4802,7 @@ static void tracing_init_debugfs_percpu(long cpu) | |||
4727 | struct trace_option_dentry { | 4802 | struct trace_option_dentry { |
4728 | struct tracer_opt *opt; | 4803 | struct tracer_opt *opt; |
4729 | struct tracer_flags *flags; | 4804 | struct tracer_flags *flags; |
4805 | struct trace_array *tr; | ||
4730 | struct dentry *entry; | 4806 | struct dentry *entry; |
4731 | }; | 4807 | }; |
4732 | 4808 | ||
@@ -4762,7 +4838,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4762 | 4838 | ||
4763 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 4839 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
4764 | mutex_lock(&trace_types_lock); | 4840 | mutex_lock(&trace_types_lock); |
4765 | ret = __set_tracer_option(current_trace, topt->flags, | 4841 | ret = __set_tracer_option(topt->tr->current_trace, topt->flags, |
4766 | topt->opt, !val); | 4842 | topt->opt, !val); |
4767 | mutex_unlock(&trace_types_lock); | 4843 | mutex_unlock(&trace_types_lock); |
4768 | if (ret) | 4844 | if (ret) |
@@ -4801,6 +4877,7 @@ static ssize_t | |||
4801 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | 4877 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, |
4802 | loff_t *ppos) | 4878 | loff_t *ppos) |
4803 | { | 4879 | { |
4880 | struct trace_array *tr = &global_trace; | ||
4804 | long index = (long)filp->private_data; | 4881 | long index = (long)filp->private_data; |
4805 | unsigned long val; | 4882 | unsigned long val; |
4806 | int ret; | 4883 | int ret; |
@@ -4813,7 +4890,7 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4813 | return -EINVAL; | 4890 | return -EINVAL; |
4814 | 4891 | ||
4815 | mutex_lock(&trace_types_lock); | 4892 | mutex_lock(&trace_types_lock); |
4816 | ret = set_tracer_flag(1 << index, val); | 4893 | ret = set_tracer_flag(tr, 1 << index, val); |
4817 | mutex_unlock(&trace_types_lock); | 4894 | mutex_unlock(&trace_types_lock); |
4818 | 4895 | ||
4819 | if (ret < 0) | 4896 | if (ret < 0) |
@@ -4847,40 +4924,41 @@ struct dentry *trace_create_file(const char *name, | |||
4847 | } | 4924 | } |
4848 | 4925 | ||
4849 | 4926 | ||
4850 | static struct dentry *trace_options_init_dentry(void) | 4927 | static struct dentry *trace_options_init_dentry(struct trace_array *tr) |
4851 | { | 4928 | { |
4852 | struct dentry *d_tracer; | 4929 | struct dentry *d_tracer; |
4853 | static struct dentry *t_options; | ||
4854 | 4930 | ||
4855 | if (t_options) | 4931 | if (tr->options) |
4856 | return t_options; | 4932 | return tr->options; |
4857 | 4933 | ||
4858 | d_tracer = tracing_init_dentry(); | 4934 | d_tracer = tracing_init_dentry_tr(tr); |
4859 | if (!d_tracer) | 4935 | if (!d_tracer) |
4860 | return NULL; | 4936 | return NULL; |
4861 | 4937 | ||
4862 | t_options = debugfs_create_dir("options", d_tracer); | 4938 | tr->options = debugfs_create_dir("options", d_tracer); |
4863 | if (!t_options) { | 4939 | if (!tr->options) { |
4864 | pr_warning("Could not create debugfs directory 'options'\n"); | 4940 | pr_warning("Could not create debugfs directory 'options'\n"); |
4865 | return NULL; | 4941 | return NULL; |
4866 | } | 4942 | } |
4867 | 4943 | ||
4868 | return t_options; | 4944 | return tr->options; |
4869 | } | 4945 | } |
4870 | 4946 | ||
4871 | static void | 4947 | static void |
4872 | create_trace_option_file(struct trace_option_dentry *topt, | 4948 | create_trace_option_file(struct trace_array *tr, |
4949 | struct trace_option_dentry *topt, | ||
4873 | struct tracer_flags *flags, | 4950 | struct tracer_flags *flags, |
4874 | struct tracer_opt *opt) | 4951 | struct tracer_opt *opt) |
4875 | { | 4952 | { |
4876 | struct dentry *t_options; | 4953 | struct dentry *t_options; |
4877 | 4954 | ||
4878 | t_options = trace_options_init_dentry(); | 4955 | t_options = trace_options_init_dentry(tr); |
4879 | if (!t_options) | 4956 | if (!t_options) |
4880 | return; | 4957 | return; |
4881 | 4958 | ||
4882 | topt->flags = flags; | 4959 | topt->flags = flags; |
4883 | topt->opt = opt; | 4960 | topt->opt = opt; |
4961 | topt->tr = tr; | ||
4884 | 4962 | ||
4885 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, | 4963 | topt->entry = trace_create_file(opt->name, 0644, t_options, topt, |
4886 | &trace_options_fops); | 4964 | &trace_options_fops); |
@@ -4888,7 +4966,7 @@ create_trace_option_file(struct trace_option_dentry *topt, | |||
4888 | } | 4966 | } |
4889 | 4967 | ||
4890 | static struct trace_option_dentry * | 4968 | static struct trace_option_dentry * |
4891 | create_trace_option_files(struct tracer *tracer) | 4969 | create_trace_option_files(struct trace_array *tr, struct tracer *tracer) |
4892 | { | 4970 | { |
4893 | struct trace_option_dentry *topts; | 4971 | struct trace_option_dentry *topts; |
4894 | struct tracer_flags *flags; | 4972 | struct tracer_flags *flags; |
@@ -4913,7 +4991,7 @@ create_trace_option_files(struct tracer *tracer) | |||
4913 | return NULL; | 4991 | return NULL; |
4914 | 4992 | ||
4915 | for (cnt = 0; opts[cnt].name; cnt++) | 4993 | for (cnt = 0; opts[cnt].name; cnt++) |
4916 | create_trace_option_file(&topts[cnt], flags, | 4994 | create_trace_option_file(tr, &topts[cnt], flags, |
4917 | &opts[cnt]); | 4995 | &opts[cnt]); |
4918 | 4996 | ||
4919 | return topts; | 4997 | return topts; |
@@ -4936,11 +5014,12 @@ destroy_trace_option_files(struct trace_option_dentry *topts) | |||
4936 | } | 5014 | } |
4937 | 5015 | ||
4938 | static struct dentry * | 5016 | static struct dentry * |
4939 | create_trace_option_core_file(const char *option, long index) | 5017 | create_trace_option_core_file(struct trace_array *tr, |
5018 | const char *option, long index) | ||
4940 | { | 5019 | { |
4941 | struct dentry *t_options; | 5020 | struct dentry *t_options; |
4942 | 5021 | ||
4943 | t_options = trace_options_init_dentry(); | 5022 | t_options = trace_options_init_dentry(tr); |
4944 | if (!t_options) | 5023 | if (!t_options) |
4945 | return NULL; | 5024 | return NULL; |
4946 | 5025 | ||
@@ -4948,17 +5027,17 @@ create_trace_option_core_file(const char *option, long index) | |||
4948 | &trace_options_core_fops); | 5027 | &trace_options_core_fops); |
4949 | } | 5028 | } |
4950 | 5029 | ||
4951 | static __init void create_trace_options_dir(void) | 5030 | static __init void create_trace_options_dir(struct trace_array *tr) |
4952 | { | 5031 | { |
4953 | struct dentry *t_options; | 5032 | struct dentry *t_options; |
4954 | int i; | 5033 | int i; |
4955 | 5034 | ||
4956 | t_options = trace_options_init_dentry(); | 5035 | t_options = trace_options_init_dentry(tr); |
4957 | if (!t_options) | 5036 | if (!t_options) |
4958 | return; | 5037 | return; |
4959 | 5038 | ||
4960 | for (i = 0; trace_options[i]; i++) | 5039 | for (i = 0; trace_options[i]; i++) |
4961 | create_trace_option_core_file(trace_options[i], i); | 5040 | create_trace_option_core_file(tr, trace_options[i], i); |
4962 | } | 5041 | } |
4963 | 5042 | ||
4964 | static ssize_t | 5043 | static ssize_t |
@@ -4997,12 +5076,12 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
4997 | mutex_lock(&trace_types_lock); | 5076 | mutex_lock(&trace_types_lock); |
4998 | if (val) { | 5077 | if (val) { |
4999 | ring_buffer_record_on(buffer); | 5078 | ring_buffer_record_on(buffer); |
5000 | if (current_trace->start) | 5079 | if (tr->current_trace->start) |
5001 | current_trace->start(tr); | 5080 | tr->current_trace->start(tr); |
5002 | } else { | 5081 | } else { |
5003 | ring_buffer_record_off(buffer); | 5082 | ring_buffer_record_off(buffer); |
5004 | if (current_trace->stop) | 5083 | if (tr->current_trace->stop) |
5005 | current_trace->stop(tr); | 5084 | tr->current_trace->stop(tr); |
5006 | } | 5085 | } |
5007 | mutex_unlock(&trace_types_lock); | 5086 | mutex_unlock(&trace_types_lock); |
5008 | } | 5087 | } |
@@ -5019,6 +5098,38 @@ static const struct file_operations rb_simple_fops = { | |||
5019 | .llseek = default_llseek, | 5098 | .llseek = default_llseek, |
5020 | }; | 5099 | }; |
5021 | 5100 | ||
5101 | static void | ||
5102 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | ||
5103 | { | ||
5104 | |||
5105 | trace_create_file("trace_options", 0644, d_tracer, | ||
5106 | tr, &tracing_iter_fops); | ||
5107 | |||
5108 | trace_create_file("trace", 0644, d_tracer, | ||
5109 | (void *)&tr->trace_cpu, &tracing_fops); | ||
5110 | |||
5111 | trace_create_file("trace_pipe", 0444, d_tracer, | ||
5112 | (void *)&tr->trace_cpu, &tracing_pipe_fops); | ||
5113 | |||
5114 | trace_create_file("buffer_size_kb", 0644, d_tracer, | ||
5115 | (void *)&tr->trace_cpu, &tracing_entries_fops); | ||
5116 | |||
5117 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | ||
5118 | tr, &tracing_total_entries_fops); | ||
5119 | |||
5120 | trace_create_file("free_buffer", 0644, d_tracer, | ||
5121 | tr, &tracing_free_buffer_fops); | ||
5122 | |||
5123 | trace_create_file("trace_marker", 0220, d_tracer, | ||
5124 | tr, &tracing_mark_fops); | ||
5125 | |||
5126 | trace_create_file("trace_clock", 0644, d_tracer, tr, | ||
5127 | &trace_clock_fops); | ||
5128 | |||
5129 | trace_create_file("tracing_on", 0644, d_tracer, | ||
5130 | tr, &rb_simple_fops); | ||
5131 | } | ||
5132 | |||
5022 | static __init int tracer_init_debugfs(void) | 5133 | static __init int tracer_init_debugfs(void) |
5023 | { | 5134 | { |
5024 | struct dentry *d_tracer; | 5135 | struct dentry *d_tracer; |
@@ -5028,14 +5139,10 @@ static __init int tracer_init_debugfs(void) | |||
5028 | 5139 | ||
5029 | d_tracer = tracing_init_dentry(); | 5140 | d_tracer = tracing_init_dentry(); |
5030 | 5141 | ||
5031 | trace_create_file("trace_options", 0644, d_tracer, | 5142 | init_tracer_debugfs(&global_trace, d_tracer); |
5032 | NULL, &tracing_iter_fops); | ||
5033 | 5143 | ||
5034 | trace_create_file("tracing_cpumask", 0644, d_tracer, | 5144 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
5035 | NULL, &tracing_cpumask_fops); | 5145 | &global_trace, &tracing_cpumask_fops); |
5036 | |||
5037 | trace_create_file("trace", 0644, d_tracer, | ||
5038 | (void *) RING_BUFFER_ALL_CPUS, &tracing_fops); | ||
5039 | 5146 | ||
5040 | trace_create_file("available_tracers", 0444, d_tracer, | 5147 | trace_create_file("available_tracers", 0444, d_tracer, |
5041 | &global_trace, &show_traces_fops); | 5148 | &global_trace, &show_traces_fops); |
@@ -5054,30 +5161,9 @@ static __init int tracer_init_debugfs(void) | |||
5054 | trace_create_file("README", 0444, d_tracer, | 5161 | trace_create_file("README", 0444, d_tracer, |
5055 | NULL, &tracing_readme_fops); | 5162 | NULL, &tracing_readme_fops); |
5056 | 5163 | ||
5057 | trace_create_file("trace_pipe", 0444, d_tracer, | ||
5058 | (void *) RING_BUFFER_ALL_CPUS, &tracing_pipe_fops); | ||
5059 | |||
5060 | trace_create_file("buffer_size_kb", 0644, d_tracer, | ||
5061 | (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); | ||
5062 | |||
5063 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | ||
5064 | &global_trace, &tracing_total_entries_fops); | ||
5065 | |||
5066 | trace_create_file("free_buffer", 0644, d_tracer, | ||
5067 | &global_trace, &tracing_free_buffer_fops); | ||
5068 | |||
5069 | trace_create_file("trace_marker", 0220, d_tracer, | ||
5070 | NULL, &tracing_mark_fops); | ||
5071 | |||
5072 | trace_create_file("saved_cmdlines", 0444, d_tracer, | 5164 | trace_create_file("saved_cmdlines", 0444, d_tracer, |
5073 | NULL, &tracing_saved_cmdlines_fops); | 5165 | NULL, &tracing_saved_cmdlines_fops); |
5074 | 5166 | ||
5075 | trace_create_file("trace_clock", 0644, d_tracer, NULL, | ||
5076 | &trace_clock_fops); | ||
5077 | |||
5078 | trace_create_file("tracing_on", 0644, d_tracer, | ||
5079 | &global_trace, &rb_simple_fops); | ||
5080 | |||
5081 | #ifdef CONFIG_DYNAMIC_FTRACE | 5167 | #ifdef CONFIG_DYNAMIC_FTRACE |
5082 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 5168 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
5083 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 5169 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
@@ -5085,13 +5171,13 @@ static __init int tracer_init_debugfs(void) | |||
5085 | 5171 | ||
5086 | #ifdef CONFIG_TRACER_SNAPSHOT | 5172 | #ifdef CONFIG_TRACER_SNAPSHOT |
5087 | trace_create_file("snapshot", 0644, d_tracer, | 5173 | trace_create_file("snapshot", 0644, d_tracer, |
5088 | (void *) RING_BUFFER_ALL_CPUS, &snapshot_fops); | 5174 | (void *)&global_trace.trace_cpu, &snapshot_fops); |
5089 | #endif | 5175 | #endif |
5090 | 5176 | ||
5091 | create_trace_options_dir(); | 5177 | create_trace_options_dir(&global_trace); |
5092 | 5178 | ||
5093 | for_each_tracing_cpu(cpu) | 5179 | for_each_tracing_cpu(cpu) |
5094 | tracing_init_debugfs_percpu(cpu); | 5180 | tracing_init_debugfs_percpu(&global_trace, cpu); |
5095 | 5181 | ||
5096 | return 0; | 5182 | return 0; |
5097 | } | 5183 | } |
@@ -5161,7 +5247,7 @@ trace_printk_seq(struct trace_seq *s) | |||
5161 | void trace_init_global_iter(struct trace_iterator *iter) | 5247 | void trace_init_global_iter(struct trace_iterator *iter) |
5162 | { | 5248 | { |
5163 | iter->tr = &global_trace; | 5249 | iter->tr = &global_trace; |
5164 | iter->trace = current_trace; | 5250 | iter->trace = iter->tr->current_trace; |
5165 | iter->cpu_file = RING_BUFFER_ALL_CPUS; | 5251 | iter->cpu_file = RING_BUFFER_ALL_CPUS; |
5166 | } | 5252 | } |
5167 | 5253 | ||
@@ -5315,6 +5401,8 @@ __init static int tracer_alloc_buffers(void) | |||
5315 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 5401 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
5316 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 5402 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
5317 | 5403 | ||
5404 | raw_spin_lock_init(&global_trace.start_lock); | ||
5405 | |||
5318 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 5406 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
5319 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); | 5407 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); |
5320 | if (!global_trace.buffer) { | 5408 | if (!global_trace.buffer) { |
@@ -5328,6 +5416,7 @@ __init static int tracer_alloc_buffers(void) | |||
5328 | 5416 | ||
5329 | #ifdef CONFIG_TRACER_MAX_TRACE | 5417 | #ifdef CONFIG_TRACER_MAX_TRACE |
5330 | max_tr.buffer = ring_buffer_alloc(1, rb_flags); | 5418 | max_tr.buffer = ring_buffer_alloc(1, rb_flags); |
5419 | raw_spin_lock_init(&max_tr.start_lock); | ||
5331 | if (!max_tr.buffer) { | 5420 | if (!max_tr.buffer) { |
5332 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 5421 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
5333 | WARN_ON(1); | 5422 | WARN_ON(1); |
@@ -5339,7 +5428,11 @@ __init static int tracer_alloc_buffers(void) | |||
5339 | /* Allocate the first page for all buffers */ | 5428 | /* Allocate the first page for all buffers */ |
5340 | for_each_tracing_cpu(i) { | 5429 | for_each_tracing_cpu(i) { |
5341 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 5430 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
5431 | global_trace.data[i]->trace_cpu.cpu = i; | ||
5432 | global_trace.data[i]->trace_cpu.tr = &global_trace; | ||
5342 | max_tr.data[i] = &per_cpu(max_tr_data, i); | 5433 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
5434 | max_tr.data[i]->trace_cpu.cpu = i; | ||
5435 | max_tr.data[i]->trace_cpu.tr = &max_tr; | ||
5343 | } | 5436 | } |
5344 | 5437 | ||
5345 | set_buffer_entries(&global_trace, | 5438 | set_buffer_entries(&global_trace, |
@@ -5353,6 +5446,8 @@ __init static int tracer_alloc_buffers(void) | |||
5353 | 5446 | ||
5354 | register_tracer(&nop_trace); | 5447 | register_tracer(&nop_trace); |
5355 | 5448 | ||
5449 | global_trace.current_trace = &nop_trace; | ||
5450 | |||
5356 | /* All seems OK, enable tracing */ | 5451 | /* All seems OK, enable tracing */ |
5357 | tracing_disabled = 0; | 5452 | tracing_disabled = 0; |
5358 | 5453 | ||
@@ -5363,6 +5458,10 @@ __init static int tracer_alloc_buffers(void) | |||
5363 | 5458 | ||
5364 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; | 5459 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; |
5365 | 5460 | ||
5461 | /* Holder for file callbacks */ | ||
5462 | global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS; | ||
5463 | global_trace.trace_cpu.tr = &global_trace; | ||
5464 | |||
5366 | INIT_LIST_HEAD(&global_trace.systems); | 5465 | INIT_LIST_HEAD(&global_trace.systems); |
5367 | INIT_LIST_HEAD(&global_trace.events); | 5466 | INIT_LIST_HEAD(&global_trace.events); |
5368 | list_add(&global_trace.list, &ftrace_trace_arrays); | 5467 | list_add(&global_trace.list, &ftrace_trace_arrays); |
@@ -5371,7 +5470,7 @@ __init static int tracer_alloc_buffers(void) | |||
5371 | char *option; | 5470 | char *option; |
5372 | 5471 | ||
5373 | option = strsep(&trace_boot_options, ","); | 5472 | option = strsep(&trace_boot_options, ","); |
5374 | trace_set_options(option); | 5473 | trace_set_options(&global_trace, option); |
5375 | } | 5474 | } |
5376 | 5475 | ||
5377 | return 0; | 5476 | return 0; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index da09a037abcd..b80fbcf70af4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -127,12 +127,21 @@ enum trace_flag_type { | |||
127 | 127 | ||
128 | #define TRACE_BUF_SIZE 1024 | 128 | #define TRACE_BUF_SIZE 1024 |
129 | 129 | ||
130 | struct trace_array; | ||
131 | |||
132 | struct trace_cpu { | ||
133 | struct trace_array *tr; | ||
134 | struct dentry *dir; | ||
135 | int cpu; | ||
136 | }; | ||
137 | |||
130 | /* | 138 | /* |
131 | * The CPU trace array - it consists of thousands of trace entries | 139 | * The CPU trace array - it consists of thousands of trace entries |
132 | * plus some other descriptor data: (for example which task started | 140 | * plus some other descriptor data: (for example which task started |
133 | * the trace, etc.) | 141 | * the trace, etc.) |
134 | */ | 142 | */ |
135 | struct trace_array_cpu { | 143 | struct trace_array_cpu { |
144 | struct trace_cpu trace_cpu; | ||
136 | atomic_t disabled; | 145 | atomic_t disabled; |
137 | void *buffer_page; /* ring buffer spare */ | 146 | void *buffer_page; /* ring buffer spare */ |
138 | 147 | ||
@@ -151,6 +160,8 @@ struct trace_array_cpu { | |||
151 | char comm[TASK_COMM_LEN]; | 160 | char comm[TASK_COMM_LEN]; |
152 | }; | 161 | }; |
153 | 162 | ||
163 | struct tracer; | ||
164 | |||
154 | /* | 165 | /* |
155 | * The trace array - an array of per-CPU trace arrays. This is the | 166 | * The trace array - an array of per-CPU trace arrays. This is the |
156 | * highest level data structure that individual tracers deal with. | 167 | * highest level data structure that individual tracers deal with. |
@@ -161,9 +172,16 @@ struct trace_array { | |||
161 | struct list_head list; | 172 | struct list_head list; |
162 | int cpu; | 173 | int cpu; |
163 | int buffer_disabled; | 174 | int buffer_disabled; |
175 | struct trace_cpu trace_cpu; /* place holder */ | ||
176 | int stop_count; | ||
177 | int clock_id; | ||
178 | struct tracer *current_trace; | ||
164 | unsigned int flags; | 179 | unsigned int flags; |
165 | cycle_t time_start; | 180 | cycle_t time_start; |
181 | raw_spinlock_t start_lock; | ||
166 | struct dentry *dir; | 182 | struct dentry *dir; |
183 | struct dentry *options; | ||
184 | struct dentry *percpu_dir; | ||
167 | struct dentry *event_dir; | 185 | struct dentry *event_dir; |
168 | struct list_head systems; | 186 | struct list_head systems; |
169 | struct list_head events; | 187 | struct list_head events; |
@@ -474,6 +492,7 @@ struct dentry *trace_create_file(const char *name, | |||
474 | void *data, | 492 | void *data, |
475 | const struct file_operations *fops); | 493 | const struct file_operations *fops); |
476 | 494 | ||
495 | struct dentry *tracing_init_dentry_tr(struct trace_array *tr); | ||
477 | struct dentry *tracing_init_dentry(void); | 496 | struct dentry *tracing_init_dentry(void); |
478 | 497 | ||
479 | struct ring_buffer_event; | 498 | struct ring_buffer_event; |
@@ -979,7 +998,7 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
979 | void trace_printk_init_buffers(void); | 998 | void trace_printk_init_buffers(void); |
980 | void trace_printk_start_comm(void); | 999 | void trace_printk_start_comm(void); |
981 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); | 1000 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
982 | int set_tracer_flag(unsigned int mask, int enabled); | 1001 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
983 | 1002 | ||
984 | #undef FTRACE_ENTRY | 1003 | #undef FTRACE_ENTRY |
985 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 1004 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 443b25b43b4f..b3cf6bf308ef 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -561,8 +561,8 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
561 | save_flags = trace_flags; | 561 | save_flags = trace_flags; |
562 | 562 | ||
563 | /* non overwrite screws up the latency tracers */ | 563 | /* non overwrite screws up the latency tracers */ |
564 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | 564 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
565 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | 565 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
566 | 566 | ||
567 | tracing_max_latency = 0; | 567 | tracing_max_latency = 0; |
568 | irqsoff_trace = tr; | 568 | irqsoff_trace = tr; |
@@ -581,8 +581,8 @@ static void irqsoff_tracer_reset(struct trace_array *tr) | |||
581 | 581 | ||
582 | stop_irqsoff_tracer(tr, is_graph()); | 582 | stop_irqsoff_tracer(tr, is_graph()); |
583 | 583 | ||
584 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); | 584 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
585 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); | 585 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
586 | } | 586 | } |
587 | 587 | ||
588 | static void irqsoff_tracer_start(struct trace_array *tr) | 588 | static void irqsoff_tracer_start(struct trace_array *tr) |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index fde652c9a511..5255a8477247 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -543,8 +543,8 @@ static int __wakeup_tracer_init(struct trace_array *tr) | |||
543 | save_flags = trace_flags; | 543 | save_flags = trace_flags; |
544 | 544 | ||
545 | /* non overwrite screws up the latency tracers */ | 545 | /* non overwrite screws up the latency tracers */ |
546 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | 546 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
547 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | 547 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
548 | 548 | ||
549 | tracing_max_latency = 0; | 549 | tracing_max_latency = 0; |
550 | wakeup_trace = tr; | 550 | wakeup_trace = tr; |
@@ -573,8 +573,8 @@ static void wakeup_tracer_reset(struct trace_array *tr) | |||
573 | /* make sure we put back any tasks we are tracing */ | 573 | /* make sure we put back any tasks we are tracing */ |
574 | wakeup_reset(tr); | 574 | wakeup_reset(tr); |
575 | 575 | ||
576 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); | 576 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
577 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); | 577 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
578 | } | 578 | } |
579 | 579 | ||
580 | static void wakeup_tracer_start(struct trace_array *tr) | 580 | static void wakeup_tracer_start(struct trace_array *tr) |