diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Kconfig | 11 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 116 |
3 files changed, 128 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1399f372b5dc..5d6aa92866cd 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -13,6 +13,7 @@ config FTRACE | |||
13 | depends on DEBUG_KERNEL && HAVE_FTRACE | 13 | depends on DEBUG_KERNEL && HAVE_FTRACE |
14 | select FRAME_POINTER | 14 | select FRAME_POINTER |
15 | select TRACING | 15 | select TRACING |
16 | select CONTEXT_SWITCH_TRACER | ||
16 | help | 17 | help |
17 | Enable the kernel to trace every kernel function. This is done | 18 | Enable the kernel to trace every kernel function. This is done |
18 | by using a compiler feature to insert a small, 5-byte No-Operation | 19 | by using a compiler feature to insert a small, 5-byte No-Operation |
@@ -21,3 +22,13 @@ config FTRACE | |||
21 | tracing is enabled by the administrator. If it's runtime disabled | 22 | tracing is enabled by the administrator. If it's runtime disabled |
22 | (the bootup default), then the overhead of the instructions is very | 23 | (the bootup default), then the overhead of the instructions is very |
23 | small and not measurable even in micro-benchmarks. | 24 | small and not measurable even in micro-benchmarks. |
25 | |||
26 | config CONTEXT_SWITCH_TRACER | ||
27 | bool "Trace process context switches" | ||
28 | depends on DEBUG_KERNEL | ||
29 | select TRACING | ||
30 | select MARKERS | ||
31 | help | ||
32 | This tracer gets called from the context switch and records | ||
33 | all switching of tasks. | ||
34 | |||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 6bb5e50b4a40..6b54ceb7f16e 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | obj-$(CONFIG_FTRACE) += libftrace.o | 1 | obj-$(CONFIG_FTRACE) += libftrace.o |
2 | 2 | ||
3 | obj-$(CONFIG_TRACING) += trace.o | 3 | obj-$(CONFIG_TRACING) += trace.o |
4 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | ||
4 | obj-$(CONFIG_FTRACE) += trace_functions.o | 5 | obj-$(CONFIG_FTRACE) += trace_functions.o |
5 | 6 | ||
6 | libftrace-y := ftrace.o | 7 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c new file mode 100644 index 000000000000..3e4771d3b890 --- /dev/null +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * trace context switch | ||
3 | * | ||
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | ||
5 | * | ||
6 | */ | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/fs.h> | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/kallsyms.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/marker.h> | ||
13 | #include <linux/ftrace.h> | ||
14 | |||
15 | #include "trace.h" | ||
16 | |||
17 | static struct trace_array *ctx_trace; | ||
18 | static int __read_mostly tracer_enabled; | ||
19 | |||
20 | static void notrace | ||
21 | ctx_switch_func(struct task_struct *prev, struct task_struct *next) | ||
22 | { | ||
23 | struct trace_array *tr = ctx_trace; | ||
24 | struct trace_array_cpu *data; | ||
25 | unsigned long flags; | ||
26 | long disabled; | ||
27 | int cpu; | ||
28 | |||
29 | if (!tracer_enabled) | ||
30 | return; | ||
31 | |||
32 | raw_local_irq_save(flags); | ||
33 | cpu = raw_smp_processor_id(); | ||
34 | data = tr->data[cpu]; | ||
35 | disabled = atomic_inc_return(&data->disabled); | ||
36 | |||
37 | if (likely(disabled == 1)) | ||
38 | tracing_sched_switch_trace(tr, data, prev, next, flags); | ||
39 | |||
40 | atomic_dec(&data->disabled); | ||
41 | raw_local_irq_restore(flags); | ||
42 | } | ||
43 | |||
44 | void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next) | ||
45 | { | ||
46 | tracing_record_cmdline(prev); | ||
47 | |||
48 | /* | ||
49 | * If tracer_switch_func only points to the local | ||
50 | * switch func, it still needs the ptr passed to it. | ||
51 | */ | ||
52 | ctx_switch_func(prev, next); | ||
53 | |||
54 | /* | ||
55 | * Chain to the wakeup tracer (this is a NOP if disabled): | ||
56 | */ | ||
57 | wakeup_sched_switch(prev, next); | ||
58 | } | ||
59 | |||
60 | static notrace void sched_switch_reset(struct trace_array *tr) | ||
61 | { | ||
62 | int cpu; | ||
63 | |||
64 | tr->time_start = now(tr->cpu); | ||
65 | |||
66 | for_each_online_cpu(cpu) | ||
67 | tracing_reset(tr->data[cpu]); | ||
68 | } | ||
69 | |||
70 | static notrace void start_sched_trace(struct trace_array *tr) | ||
71 | { | ||
72 | sched_switch_reset(tr); | ||
73 | tracer_enabled = 1; | ||
74 | } | ||
75 | |||
76 | static notrace void stop_sched_trace(struct trace_array *tr) | ||
77 | { | ||
78 | tracer_enabled = 0; | ||
79 | } | ||
80 | |||
81 | static notrace void sched_switch_trace_init(struct trace_array *tr) | ||
82 | { | ||
83 | ctx_trace = tr; | ||
84 | |||
85 | if (tr->ctrl) | ||
86 | start_sched_trace(tr); | ||
87 | } | ||
88 | |||
89 | static notrace void sched_switch_trace_reset(struct trace_array *tr) | ||
90 | { | ||
91 | if (tr->ctrl) | ||
92 | stop_sched_trace(tr); | ||
93 | } | ||
94 | |||
95 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | ||
96 | { | ||
97 | /* When starting a new trace, reset the buffers */ | ||
98 | if (tr->ctrl) | ||
99 | start_sched_trace(tr); | ||
100 | else | ||
101 | stop_sched_trace(tr); | ||
102 | } | ||
103 | |||
104 | static struct tracer sched_switch_trace __read_mostly = | ||
105 | { | ||
106 | .name = "sched_switch", | ||
107 | .init = sched_switch_trace_init, | ||
108 | .reset = sched_switch_trace_reset, | ||
109 | .ctrl_update = sched_switch_trace_ctrl_update, | ||
110 | }; | ||
111 | |||
112 | __init static int init_sched_switch_trace(void) | ||
113 | { | ||
114 | return register_tracer(&sched_switch_trace); | ||
115 | } | ||
116 | device_initcall(init_sched_switch_trace); | ||