aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2008-05-12 15:21:10 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 16:29:25 -0400
commit5b82a1b08a00b2adca3d9dd9777efff40b7aaaa1 (patch)
tree4dcce4af592ca177bee3dfeb34f9b482d142e713
parent0aa977f592f17004f9d1d545f2e1bb9ea71896c3 (diff)
Port ftrace to markers
Porting ftrace to the marker infrastructure. Don't need to chain to the wakeup tracer from the sched tracer, because markers support multiple probes connected. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> CC: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h32
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/trace/trace.h20
-rw-r--r--kernel/trace/trace_sched_switch.c171
-rw-r--r--kernel/trace/trace_sched_wakeup.c106
5 files changed, 255 insertions, 88 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 360ca99033d2..c0b1c69b55ce 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2131,38 +2131,6 @@ __trace_special(void *__tr, void *__data,
2131} 2131}
2132#endif 2132#endif
2133 2133
2134#ifdef CONFIG_CONTEXT_SWITCH_TRACER
2135extern void
2136ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next);
2137extern void
2138ftrace_wake_up_task(void *rq, struct task_struct *wakee,
2139 struct task_struct *curr);
2140extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
2141extern void
2142ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
2143#else
2144static inline void
2145ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
2146{
2147}
2148static inline void
2149sched_trace_special(unsigned long p1, unsigned long p2, unsigned long p3)
2150{
2151}
2152static inline void
2153ftrace_wake_up_task(void *rq, struct task_struct *wakee,
2154 struct task_struct *curr)
2155{
2156}
2157static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
2158{
2159}
2160static inline void
2161ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
2162{
2163}
2164#endif
2165
2166extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2134extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
2167extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2135extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
2168 2136
diff --git a/kernel/sched.c b/kernel/sched.c
index ad95cca4e42e..e2e985eeee78 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2500,7 +2500,9 @@ out_activate:
2500 success = 1; 2500 success = 1;
2501 2501
2502out_running: 2502out_running:
2503 ftrace_wake_up_task(rq, p, rq->curr); 2503 trace_mark(kernel_sched_wakeup,
2504 "pid %d state %ld ## rq %p task %p rq->curr %p",
2505 p->pid, p->state, rq, p, rq->curr);
2504 check_preempt_curr(rq, p); 2506 check_preempt_curr(rq, p);
2505 2507
2506 p->state = TASK_RUNNING; 2508 p->state = TASK_RUNNING;
@@ -2631,7 +2633,9 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2631 p->sched_class->task_new(rq, p); 2633 p->sched_class->task_new(rq, p);
2632 inc_nr_running(rq); 2634 inc_nr_running(rq);
2633 } 2635 }
2634 ftrace_wake_up_task(rq, p, rq->curr); 2636 trace_mark(kernel_sched_wakeup_new,
2637 "pid %d state %ld ## rq %p task %p rq->curr %p",
2638 p->pid, p->state, rq, p, rq->curr);
2635 check_preempt_curr(rq, p); 2639 check_preempt_curr(rq, p);
2636#ifdef CONFIG_SMP 2640#ifdef CONFIG_SMP
2637 if (p->sched_class->task_wake_up) 2641 if (p->sched_class->task_wake_up)
@@ -2804,7 +2808,11 @@ context_switch(struct rq *rq, struct task_struct *prev,
2804 struct mm_struct *mm, *oldmm; 2808 struct mm_struct *mm, *oldmm;
2805 2809
2806 prepare_task_switch(rq, prev, next); 2810 prepare_task_switch(rq, prev, next);
2807 ftrace_ctx_switch(rq, prev, next); 2811 trace_mark(kernel_sched_schedule,
2812 "prev_pid %d next_pid %d prev_state %ld "
2813 "## rq %p prev %p next %p",
2814 prev->pid, next->pid, prev->state,
2815 rq, prev, next);
2808 mm = next->mm; 2816 mm = next->mm;
2809 oldmm = prev->active_mm; 2817 oldmm = prev->active_mm;
2810 /* 2818 /*
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8845033ab49d..f5de0601b408 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -234,25 +234,10 @@ void update_max_tr_single(struct trace_array *tr,
234 234
235extern cycle_t ftrace_now(int cpu); 235extern cycle_t ftrace_now(int cpu);
236 236
237#ifdef CONFIG_SCHED_TRACER
238extern void
239wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
240extern void
241wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr);
242#else
243static inline void
244wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
245{
246}
247static inline void
248wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
249{
250}
251#endif
252
253#ifdef CONFIG_CONTEXT_SWITCH_TRACER 237#ifdef CONFIG_CONTEXT_SWITCH_TRACER
254typedef void 238typedef void
255(*tracer_switch_func_t)(void *private, 239(*tracer_switch_func_t)(void *private,
240 void *__rq,
256 struct task_struct *prev, 241 struct task_struct *prev,
257 struct task_struct *next); 242 struct task_struct *next);
258 243
@@ -262,9 +247,6 @@ struct tracer_switch_ops {
262 struct tracer_switch_ops *next; 247 struct tracer_switch_ops *next;
263}; 248};
264 249
265extern int register_tracer_switch(struct tracer_switch_ops *ops);
266extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
267
268#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 250#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
269 251
270#ifdef CONFIG_DYNAMIC_FTRACE 252#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index a3376478fc2c..d25ffa5eaf2b 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -16,11 +16,14 @@
16 16
17static struct trace_array *ctx_trace; 17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19static atomic_t sched_ref;
19 20
20static void 21static void
21ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) 22sched_switch_func(void *private, void *__rq, struct task_struct *prev,
23 struct task_struct *next)
22{ 24{
23 struct trace_array *tr = ctx_trace; 25 struct trace_array **ptr = private;
26 struct trace_array *tr = *ptr;
24 struct trace_array_cpu *data; 27 struct trace_array_cpu *data;
25 unsigned long flags; 28 unsigned long flags;
26 long disabled; 29 long disabled;
@@ -41,10 +44,40 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
41 local_irq_restore(flags); 44 local_irq_restore(flags);
42} 45}
43 46
47static notrace void
48sched_switch_callback(void *probe_data, void *call_data,
49 const char *format, va_list *args)
50{
51 struct task_struct *prev;
52 struct task_struct *next;
53 struct rq *__rq;
54
55 if (!atomic_read(&sched_ref))
56 return;
57
58 /* skip prev_pid %d next_pid %d prev_state %ld */
59 (void)va_arg(*args, int);
60 (void)va_arg(*args, int);
61 (void)va_arg(*args, long);
62 __rq = va_arg(*args, typeof(__rq));
63 prev = va_arg(*args, typeof(prev));
64 next = va_arg(*args, typeof(next));
65
66 tracing_record_cmdline(prev);
67
68 /*
69 * If tracer_switch_func only points to the local
70 * switch func, it still needs the ptr passed to it.
71 */
72 sched_switch_func(probe_data, __rq, prev, next);
73}
74
44static void 75static void
45wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) 76wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
77 task_struct *curr)
46{ 78{
47 struct trace_array *tr = ctx_trace; 79 struct trace_array **ptr = private;
80 struct trace_array *tr = *ptr;
48 struct trace_array_cpu *data; 81 struct trace_array_cpu *data;
49 unsigned long flags; 82 unsigned long flags;
50 long disabled; 83 long disabled;
@@ -67,35 +100,29 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
67 local_irq_restore(flags); 100 local_irq_restore(flags);
68} 101}
69 102
70void 103static notrace void
71ftrace_ctx_switch(void *__rq, struct task_struct *prev, 104wake_up_callback(void *probe_data, void *call_data,
72 struct task_struct *next) 105 const char *format, va_list *args)
73{ 106{
74 if (unlikely(atomic_read(&trace_record_cmdline_enabled))) 107 struct task_struct *curr;
75 tracing_record_cmdline(prev); 108 struct task_struct *task;
109 struct rq *__rq;
76 110
77 /* 111 if (likely(!tracer_enabled))
78 * If tracer_switch_func only points to the local 112 return;
79 * switch func, it still needs the ptr passed to it.
80 */
81 ctx_switch_func(__rq, prev, next);
82 113
83 /* 114 /* Skip pid %d state %ld */
84 * Chain to the wakeup tracer (this is a NOP if disabled): 115 (void)va_arg(*args, int);
85 */ 116 (void)va_arg(*args, long);
86 wakeup_sched_switch(prev, next); 117 /* now get the meat: "rq %p task %p rq->curr %p" */
87} 118 __rq = va_arg(*args, typeof(__rq));
119 task = va_arg(*args, typeof(task));
120 curr = va_arg(*args, typeof(curr));
88 121
89void 122 tracing_record_cmdline(task);
90ftrace_wake_up_task(void *__rq, struct task_struct *wakee, 123 tracing_record_cmdline(curr);
91 struct task_struct *curr)
92{
93 wakeup_func(__rq, wakee, curr);
94 124
95 /* 125 wakeup_func(probe_data, __rq, task, curr);
96 * Chain to the wakeup tracer (this is a NOP if disabled):
97 */
98 wakeup_sched_wakeup(wakee, curr);
99} 126}
100 127
101void 128void
@@ -132,15 +159,95 @@ static void sched_switch_reset(struct trace_array *tr)
132 tracing_reset(tr->data[cpu]); 159 tracing_reset(tr->data[cpu]);
133} 160}
134 161
162static int tracing_sched_register(void)
163{
164 int ret;
165
166 ret = marker_probe_register("kernel_sched_wakeup",
167 "pid %d state %ld ## rq %p task %p rq->curr %p",
168 wake_up_callback,
169 &ctx_trace);
170 if (ret) {
171 pr_info("wakeup trace: Couldn't add marker"
172 " probe to kernel_sched_wakeup\n");
173 return ret;
174 }
175
176 ret = marker_probe_register("kernel_sched_wakeup_new",
177 "pid %d state %ld ## rq %p task %p rq->curr %p",
178 wake_up_callback,
179 &ctx_trace);
180 if (ret) {
181 pr_info("wakeup trace: Couldn't add marker"
182 " probe to kernel_sched_wakeup_new\n");
183 goto fail_deprobe;
184 }
185
186 ret = marker_probe_register("kernel_sched_schedule",
187 "prev_pid %d next_pid %d prev_state %ld "
188 "## rq %p prev %p next %p",
189 sched_switch_callback,
190 &ctx_trace);
191 if (ret) {
192 pr_info("sched trace: Couldn't add marker"
193 " probe to kernel_sched_schedule\n");
194 goto fail_deprobe_wake_new;
195 }
196
197 return ret;
198fail_deprobe_wake_new:
199 marker_probe_unregister("kernel_sched_wakeup_new",
200 wake_up_callback,
201 &ctx_trace);
202fail_deprobe:
203 marker_probe_unregister("kernel_sched_wakeup",
204 wake_up_callback,
205 &ctx_trace);
206 return ret;
207}
208
209static void tracing_sched_unregister(void)
210{
211 marker_probe_unregister("kernel_sched_schedule",
212 sched_switch_callback,
213 &ctx_trace);
214 marker_probe_unregister("kernel_sched_wakeup_new",
215 wake_up_callback,
216 &ctx_trace);
217 marker_probe_unregister("kernel_sched_wakeup",
218 wake_up_callback,
219 &ctx_trace);
220}
221
222void tracing_start_sched_switch(void)
223{
224 long ref;
225
226 ref = atomic_inc_return(&sched_ref);
227 if (ref == 1)
228 tracing_sched_register();
229}
230
231void tracing_stop_sched_switch(void)
232{
233 long ref;
234
235 ref = atomic_dec_and_test(&sched_ref);
236 if (ref)
237 tracing_sched_unregister();
238}
239
135static void start_sched_trace(struct trace_array *tr) 240static void start_sched_trace(struct trace_array *tr)
136{ 241{
137 sched_switch_reset(tr); 242 sched_switch_reset(tr);
138 atomic_inc(&trace_record_cmdline_enabled); 243 atomic_inc(&trace_record_cmdline_enabled);
139 tracer_enabled = 1; 244 tracer_enabled = 1;
245 tracing_start_sched_switch();
140} 246}
141 247
142static void stop_sched_trace(struct trace_array *tr) 248static void stop_sched_trace(struct trace_array *tr)
143{ 249{
250 tracing_stop_sched_switch();
144 atomic_dec(&trace_record_cmdline_enabled); 251 atomic_dec(&trace_record_cmdline_enabled);
145 tracer_enabled = 0; 252 tracer_enabled = 0;
146} 253}
@@ -181,6 +288,14 @@ static struct tracer sched_switch_trace __read_mostly =
181 288
182__init static int init_sched_switch_trace(void) 289__init static int init_sched_switch_trace(void)
183{ 290{
291 int ret = 0;
292
293 if (atomic_read(&sched_ref))
294 ret = tracing_sched_register();
295 if (ret) {
296 pr_info("error registering scheduler trace\n");
297 return ret;
298 }
184 return register_tracer(&sched_switch_trace); 299 return register_tracer(&sched_switch_trace);
185} 300}
186device_initcall(init_sched_switch_trace); 301device_initcall(init_sched_switch_trace);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5948011006bc..5d2fb48e47f8 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -15,6 +15,7 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/marker.h>
18 19
19#include "trace.h" 20#include "trace.h"
20 21
@@ -44,11 +45,13 @@ static int report_latency(cycle_t delta)
44 return 1; 45 return 1;
45} 46}
46 47
47void 48static void notrace
48wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) 49wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
50 struct task_struct *next)
49{ 51{
50 unsigned long latency = 0, t0 = 0, t1 = 0; 52 unsigned long latency = 0, t0 = 0, t1 = 0;
51 struct trace_array *tr = wakeup_trace; 53 struct trace_array **ptr = private;
54 struct trace_array *tr = *ptr;
52 struct trace_array_cpu *data; 55 struct trace_array_cpu *data;
53 cycle_t T0, T1, delta; 56 cycle_t T0, T1, delta;
54 unsigned long flags; 57 unsigned long flags;
@@ -113,6 +116,31 @@ out:
113 atomic_dec(&tr->data[cpu]->disabled); 116 atomic_dec(&tr->data[cpu]->disabled);
114} 117}
115 118
119static notrace void
120sched_switch_callback(void *probe_data, void *call_data,
121 const char *format, va_list *args)
122{
123 struct task_struct *prev;
124 struct task_struct *next;
125 struct rq *__rq;
126
127 /* skip prev_pid %d next_pid %d prev_state %ld */
128 (void)va_arg(*args, int);
129 (void)va_arg(*args, int);
130 (void)va_arg(*args, long);
131 __rq = va_arg(*args, typeof(__rq));
132 prev = va_arg(*args, typeof(prev));
133 next = va_arg(*args, typeof(next));
134
135 tracing_record_cmdline(prev);
136
137 /*
138 * If tracer_switch_func only points to the local
139 * switch func, it still needs the ptr passed to it.
140 */
141 wakeup_sched_switch(probe_data, __rq, prev, next);
142}
143
116static void __wakeup_reset(struct trace_array *tr) 144static void __wakeup_reset(struct trace_array *tr)
117{ 145{
118 struct trace_array_cpu *data; 146 struct trace_array_cpu *data;
@@ -188,19 +216,68 @@ out:
188 atomic_dec(&tr->data[cpu]->disabled); 216 atomic_dec(&tr->data[cpu]->disabled);
189} 217}
190 218
191void wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr) 219static notrace void
220wake_up_callback(void *probe_data, void *call_data,
221 const char *format, va_list *args)
192{ 222{
223 struct trace_array **ptr = probe_data;
224 struct trace_array *tr = *ptr;
225 struct task_struct *curr;
226 struct task_struct *task;
227 struct rq *__rq;
228
193 if (likely(!tracer_enabled)) 229 if (likely(!tracer_enabled))
194 return; 230 return;
195 231
232 /* Skip pid %d state %ld */
233 (void)va_arg(*args, int);
234 (void)va_arg(*args, long);
235 /* now get the meat: "rq %p task %p rq->curr %p" */
236 __rq = va_arg(*args, typeof(__rq));
237 task = va_arg(*args, typeof(task));
238 curr = va_arg(*args, typeof(curr));
239
240 tracing_record_cmdline(task);
196 tracing_record_cmdline(curr); 241 tracing_record_cmdline(curr);
197 tracing_record_cmdline(wakee);
198 242
199 wakeup_check_start(wakeup_trace, wakee, curr); 243 wakeup_check_start(tr, task, curr);
200} 244}
201 245
202static void start_wakeup_tracer(struct trace_array *tr) 246static void start_wakeup_tracer(struct trace_array *tr)
203{ 247{
248 int ret;
249
250 ret = marker_probe_register("kernel_sched_wakeup",
251 "pid %d state %ld ## rq %p task %p rq->curr %p",
252 wake_up_callback,
253 &wakeup_trace);
254 if (ret) {
255 pr_info("wakeup trace: Couldn't add marker"
256 " probe to kernel_sched_wakeup\n");
257 return;
258 }
259
260 ret = marker_probe_register("kernel_sched_wakeup_new",
261 "pid %d state %ld ## rq %p task %p rq->curr %p",
262 wake_up_callback,
263 &wakeup_trace);
264 if (ret) {
265 pr_info("wakeup trace: Couldn't add marker"
266 " probe to kernel_sched_wakeup_new\n");
267 goto fail_deprobe;
268 }
269
270 ret = marker_probe_register("kernel_sched_schedule",
271 "prev_pid %d next_pid %d prev_state %ld "
272 "## rq %p prev %p next %p",
273 sched_switch_callback,
274 &wakeup_trace);
275 if (ret) {
276 pr_info("sched trace: Couldn't add marker"
277 " probe to kernel_sched_schedule\n");
278 goto fail_deprobe_wake_new;
279 }
280
204 wakeup_reset(tr); 281 wakeup_reset(tr);
205 282
206 /* 283 /*
@@ -215,11 +292,28 @@ static void start_wakeup_tracer(struct trace_array *tr)
215 tracer_enabled = 1; 292 tracer_enabled = 1;
216 293
217 return; 294 return;
295fail_deprobe_wake_new:
296 marker_probe_unregister("kernel_sched_wakeup_new",
297 wake_up_callback,
298 &wakeup_trace);
299fail_deprobe:
300 marker_probe_unregister("kernel_sched_wakeup",
301 wake_up_callback,
302 &wakeup_trace);
218} 303}
219 304
220static void stop_wakeup_tracer(struct trace_array *tr) 305static void stop_wakeup_tracer(struct trace_array *tr)
221{ 306{
222 tracer_enabled = 0; 307 tracer_enabled = 0;
308 marker_probe_unregister("kernel_sched_schedule",
309 sched_switch_callback,
310 &wakeup_trace);
311 marker_probe_unregister("kernel_sched_wakeup_new",
312 wake_up_callback,
313 &wakeup_trace);
314 marker_probe_unregister("kernel_sched_wakeup",
315 wake_up_callback,
316 &wakeup_trace);
223} 317}
224 318
225static void wakeup_tracer_init(struct trace_array *tr) 319static void wakeup_tracer_init(struct trace_array *tr)