diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 27 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 24 |
2 files changed, 25 insertions, 26 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0567f51bbea4..583fe24903d3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -941,6 +941,30 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
941 | trace_wake_up(); | 941 | trace_wake_up(); |
942 | } | 942 | } |
943 | 943 | ||
944 | void | ||
945 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
946 | { | ||
947 | struct trace_array *tr = &global_trace; | ||
948 | struct trace_array_cpu *data; | ||
949 | unsigned long flags; | ||
950 | long disabled; | ||
951 | int cpu; | ||
952 | |||
953 | if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) | ||
954 | return; | ||
955 | |||
956 | local_irq_save(flags); | ||
957 | cpu = raw_smp_processor_id(); | ||
958 | data = tr->data[cpu]; | ||
959 | disabled = atomic_inc_return(&data->disabled); | ||
960 | |||
961 | if (likely(disabled == 1)) | ||
962 | __trace_special(tr, data, arg1, arg2, arg3); | ||
963 | |||
964 | atomic_dec(&data->disabled); | ||
965 | local_irq_restore(flags); | ||
966 | } | ||
967 | |||
944 | #ifdef CONFIG_FTRACE | 968 | #ifdef CONFIG_FTRACE |
945 | static void | 969 | static void |
946 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 970 | function_trace_call(unsigned long ip, unsigned long parent_ip) |
@@ -2941,8 +2965,6 @@ __init static int tracer_alloc_buffers(void) | |||
2941 | int ret = -ENOMEM; | 2965 | int ret = -ENOMEM; |
2942 | int i; | 2966 | int i; |
2943 | 2967 | ||
2944 | global_trace.ctrl = tracer_enabled; | ||
2945 | |||
2946 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 2968 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
2947 | tracing_nr_buffers = num_possible_cpus(); | 2969 | tracing_nr_buffers = num_possible_cpus(); |
2948 | tracing_buffer_mask = cpu_possible_map; | 2970 | tracing_buffer_mask = cpu_possible_map; |
@@ -3012,6 +3034,7 @@ __init static int tracer_alloc_buffers(void) | |||
3012 | current_trace = &no_tracer; | 3034 | current_trace = &no_tracer; |
3013 | 3035 | ||
3014 | /* All seems OK, enable tracing */ | 3036 | /* All seems OK, enable tracing */ |
3037 | global_trace.ctrl = tracer_enabled; | ||
3015 | tracing_disabled = 0; | 3038 | tracing_disabled = 0; |
3016 | 3039 | ||
3017 | return 0; | 3040 | return 0; |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index d25ffa5eaf2b..798ec0dc863c 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -125,30 +125,6 @@ wake_up_callback(void *probe_data, void *call_data, | |||
125 | wakeup_func(probe_data, __rq, task, curr); | 125 | wakeup_func(probe_data, __rq, task, curr); |
126 | } | 126 | } |
127 | 127 | ||
128 | void | ||
129 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
130 | { | ||
131 | struct trace_array *tr = ctx_trace; | ||
132 | struct trace_array_cpu *data; | ||
133 | unsigned long flags; | ||
134 | long disabled; | ||
135 | int cpu; | ||
136 | |||
137 | if (!tracer_enabled) | ||
138 | return; | ||
139 | |||
140 | local_irq_save(flags); | ||
141 | cpu = raw_smp_processor_id(); | ||
142 | data = tr->data[cpu]; | ||
143 | disabled = atomic_inc_return(&data->disabled); | ||
144 | |||
145 | if (likely(disabled == 1)) | ||
146 | __trace_special(tr, data, arg1, arg2, arg3); | ||
147 | |||
148 | atomic_dec(&data->disabled); | ||
149 | local_irq_restore(flags); | ||
150 | } | ||
151 | |||
152 | static void sched_switch_reset(struct trace_array *tr) | 128 | static void sched_switch_reset(struct trace_array *tr) |
153 | { | 129 | { |
154 | int cpu; | 130 | int cpu; |