aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_switch.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r--kernel/trace/trace_sched_switch.c116
1 files changed, 116 insertions, 0 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
new file mode 100644
index 000000000000..3e4771d3b890
--- /dev/null
+++ b/kernel/trace/trace_sched_switch.c
@@ -0,0 +1,116 @@
1/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h>
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
19
20static void notrace
21ctx_switch_func(struct task_struct *prev, struct task_struct *next)
22{
23 struct trace_array *tr = ctx_trace;
24 struct trace_array_cpu *data;
25 unsigned long flags;
26 long disabled;
27 int cpu;
28
29 if (!tracer_enabled)
30 return;
31
32 raw_local_irq_save(flags);
33 cpu = raw_smp_processor_id();
34 data = tr->data[cpu];
35 disabled = atomic_inc_return(&data->disabled);
36
37 if (likely(disabled == 1))
38 tracing_sched_switch_trace(tr, data, prev, next, flags);
39
40 atomic_dec(&data->disabled);
41 raw_local_irq_restore(flags);
42}
43
44void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
45{
46 tracing_record_cmdline(prev);
47
48 /*
49 * If tracer_switch_func only points to the local
50 * switch func, it still needs the ptr passed to it.
51 */
52 ctx_switch_func(prev, next);
53
54 /*
55 * Chain to the wakeup tracer (this is a NOP if disabled):
56 */
57 wakeup_sched_switch(prev, next);
58}
59
60static notrace void sched_switch_reset(struct trace_array *tr)
61{
62 int cpu;
63
64 tr->time_start = now(tr->cpu);
65
66 for_each_online_cpu(cpu)
67 tracing_reset(tr->data[cpu]);
68}
69
70static notrace void start_sched_trace(struct trace_array *tr)
71{
72 sched_switch_reset(tr);
73 tracer_enabled = 1;
74}
75
76static notrace void stop_sched_trace(struct trace_array *tr)
77{
78 tracer_enabled = 0;
79}
80
81static notrace void sched_switch_trace_init(struct trace_array *tr)
82{
83 ctx_trace = tr;
84
85 if (tr->ctrl)
86 start_sched_trace(tr);
87}
88
89static notrace void sched_switch_trace_reset(struct trace_array *tr)
90{
91 if (tr->ctrl)
92 stop_sched_trace(tr);
93}
94
95static void sched_switch_trace_ctrl_update(struct trace_array *tr)
96{
97 /* When starting a new trace, reset the buffers */
98 if (tr->ctrl)
99 start_sched_trace(tr);
100 else
101 stop_sched_trace(tr);
102}
103
104static struct tracer sched_switch_trace __read_mostly =
105{
106 .name = "sched_switch",
107 .init = sched_switch_trace_init,
108 .reset = sched_switch_trace_reset,
109 .ctrl_update = sched_switch_trace_ctrl_update,
110};
111
112__init static int init_sched_switch_trace(void)
113{
114 return register_tracer(&sched_switch_trace);
115}
116device_initcall(init_sched_switch_trace);