aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_switch.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
commita9de18eb761f7c1c860964b2e5addc1a35c7e861 (patch)
tree886e75fdfd09690cd262ca69cb7f5d1d42b48602 /kernel/trace/trace_sched_switch.c
parentb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (diff)
parent6a94cb73064c952255336cc57731904174b2c58f (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/include/asm/pda.h kernel/fork.c
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r--kernel/trace/trace_sched_switch.c252
1 files changed, 103 insertions, 149 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index cb817a209aa0..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -9,25 +9,27 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <linux/uaccess.h> 11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h> 12#include <linux/ftrace.h>
13#include <trace/sched.h>
14 14
15#include "trace.h" 15#include "trace.h"
16 16
17static struct trace_array *ctx_trace; 17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19static atomic_t sched_ref; 19static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
20 21
21static void 22static void
22sched_switch_func(void *private, void *__rq, struct task_struct *prev, 23probe_sched_switch(struct rq *__rq, struct task_struct *prev,
23 struct task_struct *next) 24 struct task_struct *next)
24{ 25{
25 struct trace_array **ptr = private;
26 struct trace_array *tr = *ptr;
27 struct trace_array_cpu *data; 26 struct trace_array_cpu *data;
28 unsigned long flags; 27 unsigned long flags;
29 long disabled;
30 int cpu; 28 int cpu;
29 int pc;
30
31 if (!sched_ref)
32 return;
31 33
32 tracing_record_cmdline(prev); 34 tracing_record_cmdline(prev);
33 tracing_record_cmdline(next); 35 tracing_record_cmdline(next);
@@ -35,183 +37,95 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev,
35 if (!tracer_enabled) 37 if (!tracer_enabled)
36 return; 38 return;
37 39
40 pc = preempt_count();
38 local_irq_save(flags); 41 local_irq_save(flags);
39 cpu = raw_smp_processor_id(); 42 cpu = raw_smp_processor_id();
40 data = tr->data[cpu]; 43 data = ctx_trace->data[cpu];
41 disabled = atomic_inc_return(&data->disabled);
42 44
43 if (likely(disabled == 1)) 45 if (likely(!atomic_read(&data->disabled)))
44 tracing_sched_switch_trace(tr, data, prev, next, flags); 46 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
45 47
46 atomic_dec(&data->disabled);
47 local_irq_restore(flags); 48 local_irq_restore(flags);
48} 49}
49 50
50static notrace void
51sched_switch_callback(void *probe_data, void *call_data,
52 const char *format, va_list *args)
53{
54 struct task_struct *prev;
55 struct task_struct *next;
56 struct rq *__rq;
57
58 if (!atomic_read(&sched_ref))
59 return;
60
61 /* skip prev_pid %d next_pid %d prev_state %ld */
62 (void)va_arg(*args, int);
63 (void)va_arg(*args, int);
64 (void)va_arg(*args, long);
65 __rq = va_arg(*args, typeof(__rq));
66 prev = va_arg(*args, typeof(prev));
67 next = va_arg(*args, typeof(next));
68
69 /*
70 * If tracer_switch_func only points to the local
71 * switch func, it still needs the ptr passed to it.
72 */
73 sched_switch_func(probe_data, __rq, prev, next);
74}
75
76static void 51static void
77wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct 52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
78 task_struct *curr)
79{ 53{
80 struct trace_array **ptr = private;
81 struct trace_array *tr = *ptr;
82 struct trace_array_cpu *data; 54 struct trace_array_cpu *data;
83 unsigned long flags; 55 unsigned long flags;
84 long disabled; 56 int cpu, pc;
85 int cpu;
86 57
87 if (!tracer_enabled) 58 if (!likely(tracer_enabled))
88 return; 59 return;
89 60
90 tracing_record_cmdline(curr); 61 pc = preempt_count();
62 tracing_record_cmdline(current);
91 63
92 local_irq_save(flags); 64 local_irq_save(flags);
93 cpu = raw_smp_processor_id(); 65 cpu = raw_smp_processor_id();
94 data = tr->data[cpu]; 66 data = ctx_trace->data[cpu];
95 disabled = atomic_inc_return(&data->disabled);
96 67
97 if (likely(disabled == 1)) 68 if (likely(!atomic_read(&data->disabled)))
98 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); 69 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
70 flags, pc);
99 71
100 atomic_dec(&data->disabled);
101 local_irq_restore(flags); 72 local_irq_restore(flags);
102} 73}
103 74
104static notrace void
105wake_up_callback(void *probe_data, void *call_data,
106 const char *format, va_list *args)
107{
108 struct task_struct *curr;
109 struct task_struct *task;
110 struct rq *__rq;
111
112 if (likely(!tracer_enabled))
113 return;
114
115 /* Skip pid %d state %ld */
116 (void)va_arg(*args, int);
117 (void)va_arg(*args, long);
118 /* now get the meat: "rq %p task %p rq->curr %p" */
119 __rq = va_arg(*args, typeof(__rq));
120 task = va_arg(*args, typeof(task));
121 curr = va_arg(*args, typeof(curr));
122
123 tracing_record_cmdline(task);
124 tracing_record_cmdline(curr);
125
126 wakeup_func(probe_data, __rq, task, curr);
127}
128
129static void sched_switch_reset(struct trace_array *tr)
130{
131 int cpu;
132
133 tr->time_start = ftrace_now(tr->cpu);
134
135 for_each_online_cpu(cpu)
136 tracing_reset(tr->data[cpu]);
137}
138
139static int tracing_sched_register(void) 75static int tracing_sched_register(void)
140{ 76{
141 int ret; 77 int ret;
142 78
143 ret = marker_probe_register("kernel_sched_wakeup", 79 ret = register_trace_sched_wakeup(probe_sched_wakeup);
144 "pid %d state %ld ## rq %p task %p rq->curr %p",
145 wake_up_callback,
146 &ctx_trace);
147 if (ret) { 80 if (ret) {
148 pr_info("wakeup trace: Couldn't add marker" 81 pr_info("wakeup trace: Couldn't activate tracepoint"
149 " probe to kernel_sched_wakeup\n"); 82 " probe to kernel_sched_wakeup\n");
150 return ret; 83 return ret;
151 } 84 }
152 85
153 ret = marker_probe_register("kernel_sched_wakeup_new", 86 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
154 "pid %d state %ld ## rq %p task %p rq->curr %p",
155 wake_up_callback,
156 &ctx_trace);
157 if (ret) { 87 if (ret) {
158 pr_info("wakeup trace: Couldn't add marker" 88 pr_info("wakeup trace: Couldn't activate tracepoint"
159 " probe to kernel_sched_wakeup_new\n"); 89 " probe to kernel_sched_wakeup_new\n");
160 goto fail_deprobe; 90 goto fail_deprobe;
161 } 91 }
162 92
163 ret = marker_probe_register("kernel_sched_schedule", 93 ret = register_trace_sched_switch(probe_sched_switch);
164 "prev_pid %d next_pid %d prev_state %ld "
165 "## rq %p prev %p next %p",
166 sched_switch_callback,
167 &ctx_trace);
168 if (ret) { 94 if (ret) {
169 pr_info("sched trace: Couldn't add marker" 95 pr_info("sched trace: Couldn't activate tracepoint"
170 " probe to kernel_sched_schedule\n"); 96 " probe to kernel_sched_schedule\n");
171 goto fail_deprobe_wake_new; 97 goto fail_deprobe_wake_new;
172 } 98 }
173 99
174 return ret; 100 return ret;
175fail_deprobe_wake_new: 101fail_deprobe_wake_new:
176 marker_probe_unregister("kernel_sched_wakeup_new", 102 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
177 wake_up_callback,
178 &ctx_trace);
179fail_deprobe: 103fail_deprobe:
180 marker_probe_unregister("kernel_sched_wakeup", 104 unregister_trace_sched_wakeup(probe_sched_wakeup);
181 wake_up_callback,
182 &ctx_trace);
183 return ret; 105 return ret;
184} 106}
185 107
186static void tracing_sched_unregister(void) 108static void tracing_sched_unregister(void)
187{ 109{
188 marker_probe_unregister("kernel_sched_schedule", 110 unregister_trace_sched_switch(probe_sched_switch);
189 sched_switch_callback, 111 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
190 &ctx_trace); 112 unregister_trace_sched_wakeup(probe_sched_wakeup);
191 marker_probe_unregister("kernel_sched_wakeup_new",
192 wake_up_callback,
193 &ctx_trace);
194 marker_probe_unregister("kernel_sched_wakeup",
195 wake_up_callback,
196 &ctx_trace);
197} 113}
198 114
199static void tracing_start_sched_switch(void) 115static void tracing_start_sched_switch(void)
200{ 116{
201 long ref; 117 mutex_lock(&sched_register_mutex);
202 118 if (!(sched_ref++))
203 ref = atomic_inc_return(&sched_ref);
204 if (ref == 1)
205 tracing_sched_register(); 119 tracing_sched_register();
120 mutex_unlock(&sched_register_mutex);
206} 121}
207 122
208static void tracing_stop_sched_switch(void) 123static void tracing_stop_sched_switch(void)
209{ 124{
210 long ref; 125 mutex_lock(&sched_register_mutex);
211 126 if (!(--sched_ref))
212 ref = atomic_dec_and_test(&sched_ref);
213 if (ref)
214 tracing_sched_unregister(); 127 tracing_sched_unregister();
128 mutex_unlock(&sched_register_mutex);
215} 129}
216 130
217void tracing_start_cmdline_record(void) 131void tracing_start_cmdline_record(void)
@@ -224,40 +138,86 @@ void tracing_stop_cmdline_record(void)
224 tracing_stop_sched_switch(); 138 tracing_stop_sched_switch();
225} 139}
226 140
141/**
142 * tracing_start_sched_switch_record - start tracing context switches
143 *
144 * Turns on context switch tracing for a tracer.
145 */
146void tracing_start_sched_switch_record(void)
147{
148 if (unlikely(!ctx_trace)) {
149 WARN_ON(1);
150 return;
151 }
152
153 tracing_start_sched_switch();
154
155 mutex_lock(&sched_register_mutex);
156 tracer_enabled++;
157 mutex_unlock(&sched_register_mutex);
158}
159
160/**
161 * tracing_stop_sched_switch_record - start tracing context switches
162 *
163 * Turns off context switch tracing for a tracer.
164 */
165void tracing_stop_sched_switch_record(void)
166{
167 mutex_lock(&sched_register_mutex);
168 tracer_enabled--;
169 WARN_ON(tracer_enabled < 0);
170 mutex_unlock(&sched_register_mutex);
171
172 tracing_stop_sched_switch();
173}
174
175/**
176 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
177 * @tr: trace array pointer to assign
178 *
179 * Some tracers might want to record the context switches in their
180 * trace. This function lets those tracers assign the trace array
181 * to use.
182 */
183void tracing_sched_switch_assign_trace(struct trace_array *tr)
184{
185 ctx_trace = tr;
186}
187
227static void start_sched_trace(struct trace_array *tr) 188static void start_sched_trace(struct trace_array *tr)
228{ 189{
229 sched_switch_reset(tr); 190 tracing_reset_online_cpus(tr);
230 tracing_start_cmdline_record(); 191 tracing_start_sched_switch_record();
231 tracer_enabled = 1;
232} 192}
233 193
234static void stop_sched_trace(struct trace_array *tr) 194static void stop_sched_trace(struct trace_array *tr)
235{ 195{
236 tracer_enabled = 0; 196 tracing_stop_sched_switch_record();
237 tracing_stop_cmdline_record();
238} 197}
239 198
240static void sched_switch_trace_init(struct trace_array *tr) 199static int sched_switch_trace_init(struct trace_array *tr)
241{ 200{
242 ctx_trace = tr; 201 ctx_trace = tr;
243 202 start_sched_trace(tr);
244 if (tr->ctrl) 203 return 0;
245 start_sched_trace(tr);
246} 204}
247 205
248static void sched_switch_trace_reset(struct trace_array *tr) 206static void sched_switch_trace_reset(struct trace_array *tr)
249{ 207{
250 if (tr->ctrl) 208 if (sched_ref)
251 stop_sched_trace(tr); 209 stop_sched_trace(tr);
252} 210}
253 211
254static void sched_switch_trace_ctrl_update(struct trace_array *tr) 212static void sched_switch_trace_start(struct trace_array *tr)
255{ 213{
256 /* When starting a new trace, reset the buffers */ 214 tracing_reset_online_cpus(tr);
257 if (tr->ctrl) 215 tracing_start_sched_switch();
258 start_sched_trace(tr); 216}
259 else 217
260 stop_sched_trace(tr); 218static void sched_switch_trace_stop(struct trace_array *tr)
219{
220 tracing_stop_sched_switch();
261} 221}
262 222
263static struct tracer sched_switch_trace __read_mostly = 223static struct tracer sched_switch_trace __read_mostly =
@@ -265,7 +225,8 @@ static struct tracer sched_switch_trace __read_mostly =
265 .name = "sched_switch", 225 .name = "sched_switch",
266 .init = sched_switch_trace_init, 226 .init = sched_switch_trace_init,
267 .reset = sched_switch_trace_reset, 227 .reset = sched_switch_trace_reset,
268 .ctrl_update = sched_switch_trace_ctrl_update, 228 .start = sched_switch_trace_start,
229 .stop = sched_switch_trace_stop,
269#ifdef CONFIG_FTRACE_SELFTEST 230#ifdef CONFIG_FTRACE_SELFTEST
270 .selftest = trace_selftest_startup_sched_switch, 231 .selftest = trace_selftest_startup_sched_switch,
271#endif 232#endif
@@ -273,14 +234,7 @@ static struct tracer sched_switch_trace __read_mostly =
273 234
274__init static int init_sched_switch_trace(void) 235__init static int init_sched_switch_trace(void)
275{ 236{
276 int ret = 0;
277
278 if (atomic_read(&sched_ref))
279 ret = tracing_sched_register();
280 if (ret) {
281 pr_info("error registering scheduler trace\n");
282 return ret;
283 }
284 return register_tracer(&sched_switch_trace); 237 return register_tracer(&sched_switch_trace);
285} 238}
286device_initcall(init_sched_switch_trace); 239device_initcall(init_sched_switch_trace);
240