aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-14 09:58:35 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-14 09:58:35 -0400
commit6712e299b7dc78aa4971b85e803435ee6d49a9dd (patch)
treeb3d17a2d068737ec07727b28e93c7d374c27721b /kernel/trace
parentec1bb60bbff0386c3ec25360e7a8c72f467a6ff1 (diff)
parentb2613e370dbeb69edbff989382fa54f2395aa471 (diff)
Merge branch 'tracing/ftrace' into auto-ftrace-next
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c24
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c3
6 files changed, 52 insertions, 9 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0f271c45cd02..4231a3dc224a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1602,6 +1602,23 @@ core_initcall(ftrace_dynamic_init);
1602#endif /* CONFIG_DYNAMIC_FTRACE */ 1602#endif /* CONFIG_DYNAMIC_FTRACE */
1603 1603
1604/** 1604/**
1605 * ftrace_kill_atomic - kill ftrace from critical sections
1606 *
1607 * This function should be used by panic code. It stops ftrace
1608 * but in a not so nice way. If you need to simply kill ftrace
1609 * from a non-atomic section, use ftrace_kill.
1610 */
1611void ftrace_kill_atomic(void)
1612{
1613 ftrace_disabled = 1;
1614 ftrace_enabled = 0;
1615#ifdef CONFIG_DYNAMIC_FTRACE
1616 ftraced_suspend = -1;
1617#endif
1618 clear_ftrace_function();
1619}
1620
1621/**
1605 * ftrace_kill - totally shutdown ftrace 1622 * ftrace_kill - totally shutdown ftrace
1606 * 1623 *
1607 * This is a safety measure. If something was detected that seems 1624 * This is a safety measure. If something was detected that seems
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e46de641ea44..868e121c8e38 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -96,6 +96,9 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
96/* tracer_enabled is used to toggle activation of a tracer */ 96/* tracer_enabled is used to toggle activation of a tracer */
97static int tracer_enabled = 1; 97static int tracer_enabled = 1;
98 98
99/* function tracing enabled */
100int ftrace_function_enabled;
101
99/* 102/*
100 * trace_nr_entries is the number of entries that is allocated 103 * trace_nr_entries is the number of entries that is allocated
101 * for a buffer. Note, the number of entries is always rounded 104 * for a buffer. Note, the number of entries is always rounded
@@ -134,6 +137,7 @@ static notrace void no_trace_init(struct trace_array *tr)
134{ 137{
135 int cpu; 138 int cpu;
136 139
140 ftrace_function_enabled = 0;
137 if(tr->ctrl) 141 if(tr->ctrl)
138 for_each_online_cpu(cpu) 142 for_each_online_cpu(cpu)
139 tracing_reset(tr->data[cpu]); 143 tracing_reset(tr->data[cpu]);
@@ -1027,7 +1031,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1027 long disabled; 1031 long disabled;
1028 int cpu; 1032 int cpu;
1029 1033
1030 if (unlikely(!tracer_enabled)) 1034 if (unlikely(!ftrace_function_enabled))
1031 return; 1035 return;
1032 1036
1033 if (skip_trace(ip)) 1037 if (skip_trace(ip))
@@ -1052,11 +1056,15 @@ static struct ftrace_ops trace_ops __read_mostly =
1052 1056
1053void tracing_start_function_trace(void) 1057void tracing_start_function_trace(void)
1054{ 1058{
1059 ftrace_function_enabled = 0;
1055 register_ftrace_function(&trace_ops); 1060 register_ftrace_function(&trace_ops);
1061 if (tracer_enabled)
1062 ftrace_function_enabled = 1;
1056} 1063}
1057 1064
1058void tracing_stop_function_trace(void) 1065void tracing_stop_function_trace(void)
1059{ 1066{
1067 ftrace_function_enabled = 0;
1060 unregister_ftrace_function(&trace_ops); 1068 unregister_ftrace_function(&trace_ops);
1061} 1069}
1062#endif 1070#endif
@@ -1383,7 +1391,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1383 "server", 1391 "server",
1384#elif defined(CONFIG_PREEMPT_VOLUNTARY) 1392#elif defined(CONFIG_PREEMPT_VOLUNTARY)
1385 "desktop", 1393 "desktop",
1386#elif defined(CONFIG_PREEMPT_DESKTOP) 1394#elif defined(CONFIG_PREEMPT)
1387 "preempt", 1395 "preempt",
1388#else 1396#else
1389 "unknown", 1397 "unknown",
@@ -1892,8 +1900,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1892 m->private = iter; 1900 m->private = iter;
1893 1901
1894 /* stop the trace while dumping */ 1902 /* stop the trace while dumping */
1895 if (iter->tr->ctrl) 1903 if (iter->tr->ctrl) {
1896 tracer_enabled = 0; 1904 tracer_enabled = 0;
1905 ftrace_function_enabled = 0;
1906 }
1897 1907
1898 if (iter->trace && iter->trace->open) 1908 if (iter->trace && iter->trace->open)
1899 iter->trace->open(iter); 1909 iter->trace->open(iter);
@@ -1926,8 +1936,14 @@ int tracing_release(struct inode *inode, struct file *file)
1926 iter->trace->close(iter); 1936 iter->trace->close(iter);
1927 1937
1928 /* reenable tracing if it was previously enabled */ 1938 /* reenable tracing if it was previously enabled */
1929 if (iter->tr->ctrl) 1939 if (iter->tr->ctrl) {
1930 tracer_enabled = 1; 1940 tracer_enabled = 1;
1941 /*
1942 * It is safe to enable function tracing even if it
1943 * isn't used
1944 */
1945 ftrace_function_enabled = 1;
1946 }
1931 mutex_unlock(&trace_types_lock); 1947 mutex_unlock(&trace_types_lock);
1932 1948
1933 seq_release(inode, file); 1949 seq_release(inode, file);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8cb215b239d5..f69f86788c2b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -223,8 +223,6 @@ void trace_function(struct trace_array *tr,
223 unsigned long parent_ip, 223 unsigned long parent_ip,
224 unsigned long flags); 224 unsigned long flags);
225 225
226void tracing_start_function_trace(void);
227void tracing_stop_function_trace(void);
228void tracing_start_cmdline_record(void); 226void tracing_start_cmdline_record(void);
229void tracing_stop_cmdline_record(void); 227void tracing_stop_cmdline_record(void);
230int register_tracer(struct tracer *type); 228int register_tracer(struct tracer *type);
@@ -241,6 +239,14 @@ void update_max_tr_single(struct trace_array *tr,
241 239
242extern cycle_t ftrace_now(int cpu); 240extern cycle_t ftrace_now(int cpu);
243 241
242#ifdef CONFIG_FTRACE
243void tracing_start_function_trace(void);
244void tracing_stop_function_trace(void);
245#else
246# define tracing_start_function_trace() do { } while (0)
247# define tracing_stop_function_trace() do { } while (0)
248#endif
249
244#ifdef CONFIG_CONTEXT_SWITCH_TRACER 250#ifdef CONFIG_CONTEXT_SWITCH_TRACER
245typedef void 251typedef void
246(*tracer_switch_func_t)(void *private, 252(*tracer_switch_func_t)(void *private,
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 7ee7dcd76b7d..312144897970 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -28,7 +28,10 @@ static void function_reset(struct trace_array *tr)
28 28
29static void start_function_trace(struct trace_array *tr) 29static void start_function_trace(struct trace_array *tr)
30{ 30{
31 tr->cpu = get_cpu();
31 function_reset(tr); 32 function_reset(tr);
33 put_cpu();
34
32 tracing_start_cmdline_record(); 35 tracing_start_cmdline_record();
33 tracing_start_function_trace(); 36 tracing_start_function_trace();
34} 37}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 93a662009151..cb817a209aa0 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -227,14 +227,14 @@ void tracing_stop_cmdline_record(void)
227static void start_sched_trace(struct trace_array *tr) 227static void start_sched_trace(struct trace_array *tr)
228{ 228{
229 sched_switch_reset(tr); 229 sched_switch_reset(tr);
230 tracer_enabled = 1;
231 tracing_start_cmdline_record(); 230 tracing_start_cmdline_record();
231 tracer_enabled = 1;
232} 232}
233 233
234static void stop_sched_trace(struct trace_array *tr) 234static void stop_sched_trace(struct trace_array *tr)
235{ 235{
236 tracing_stop_cmdline_record();
237 tracer_enabled = 0; 236 tracer_enabled = 0;
237 tracing_stop_cmdline_record();
238} 238}
239 239
240static void sched_switch_trace_init(struct trace_array *tr) 240static void sched_switch_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index bf7e91caef57..3c8d61df4474 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -352,9 +352,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
352 */ 352 */
353 smp_wmb(); 353 smp_wmb();
354 354
355 tracer_enabled = 1;
356 register_ftrace_function(&trace_ops); 355 register_ftrace_function(&trace_ops);
357 356
357 tracer_enabled = 1;
358
358 return; 359 return;
359fail_deprobe_wake_new: 360fail_deprobe_wake_new:
360 marker_probe_unregister("kernel_sched_wakeup_new", 361 marker_probe_unregister("kernel_sched_wakeup_new",