aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2008-07-18 12:16:17 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-14 04:32:26 -0400
commitb07c3f193a8074aa4afe43cfa8ae38ec4c7ccfa9 (patch)
tree7d2b1d9efc5a8e24cb07c8d7f0b3e056fec8f150
parent0a16b6075843325dc402edf80c1662838b929aff (diff)
ftrace: port to tracepoints
Porting the trace_mark() used by ftrace to tracepoints. (cleanup) Changelog : - Change error messages : marker -> tracepoint [ mingo@elte.hu: conflict resolutions ] Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Acked-by: 'Peter Zijlstra' <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/trace/trace_sched_switch.c120
-rw-r--r--kernel/trace/trace_sched_wakeup.c135
2 files changed, 58 insertions, 197 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index cb817a209aa0..789e927abc9c 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -9,8 +9,8 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <linux/uaccess.h> 11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h> 12#include <linux/ftrace.h>
13#include <trace/sched.h>
14 14
15#include "trace.h" 15#include "trace.h"
16 16
@@ -19,16 +19,17 @@ static int __read_mostly tracer_enabled;
19static atomic_t sched_ref; 19static atomic_t sched_ref;
20 20
21static void 21static void
22sched_switch_func(void *private, void *__rq, struct task_struct *prev, 22probe_sched_switch(struct rq *__rq, struct task_struct *prev,
23 struct task_struct *next) 23 struct task_struct *next)
24{ 24{
25 struct trace_array **ptr = private;
26 struct trace_array *tr = *ptr;
27 struct trace_array_cpu *data; 25 struct trace_array_cpu *data;
28 unsigned long flags; 26 unsigned long flags;
29 long disabled; 27 long disabled;
30 int cpu; 28 int cpu;
31 29
30 if (!atomic_read(&sched_ref))
31 return;
32
32 tracing_record_cmdline(prev); 33 tracing_record_cmdline(prev);
33 tracing_record_cmdline(next); 34 tracing_record_cmdline(next);
34 35
@@ -37,95 +38,42 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev,
37 38
38 local_irq_save(flags); 39 local_irq_save(flags);
39 cpu = raw_smp_processor_id(); 40 cpu = raw_smp_processor_id();
40 data = tr->data[cpu]; 41 data = ctx_trace->data[cpu];
41 disabled = atomic_inc_return(&data->disabled); 42 disabled = atomic_inc_return(&data->disabled);
42 43
43 if (likely(disabled == 1)) 44 if (likely(disabled == 1))
44 tracing_sched_switch_trace(tr, data, prev, next, flags); 45 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
45 46
46 atomic_dec(&data->disabled); 47 atomic_dec(&data->disabled);
47 local_irq_restore(flags); 48 local_irq_restore(flags);
48} 49}
49 50
50static notrace void
51sched_switch_callback(void *probe_data, void *call_data,
52 const char *format, va_list *args)
53{
54 struct task_struct *prev;
55 struct task_struct *next;
56 struct rq *__rq;
57
58 if (!atomic_read(&sched_ref))
59 return;
60
61 /* skip prev_pid %d next_pid %d prev_state %ld */
62 (void)va_arg(*args, int);
63 (void)va_arg(*args, int);
64 (void)va_arg(*args, long);
65 __rq = va_arg(*args, typeof(__rq));
66 prev = va_arg(*args, typeof(prev));
67 next = va_arg(*args, typeof(next));
68
69 /*
70 * If tracer_switch_func only points to the local
71 * switch func, it still needs the ptr passed to it.
72 */
73 sched_switch_func(probe_data, __rq, prev, next);
74}
75
76static void 51static void
77wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct 52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
78 task_struct *curr)
79{ 53{
80 struct trace_array **ptr = private;
81 struct trace_array *tr = *ptr;
82 struct trace_array_cpu *data; 54 struct trace_array_cpu *data;
83 unsigned long flags; 55 unsigned long flags;
84 long disabled; 56 long disabled;
85 int cpu; 57 int cpu;
86 58
87 if (!tracer_enabled) 59 if (!likely(tracer_enabled))
88 return; 60 return;
89 61
90 tracing_record_cmdline(curr); 62 tracing_record_cmdline(current);
91 63
92 local_irq_save(flags); 64 local_irq_save(flags);
93 cpu = raw_smp_processor_id(); 65 cpu = raw_smp_processor_id();
94 data = tr->data[cpu]; 66 data = ctx_trace->data[cpu];
95 disabled = atomic_inc_return(&data->disabled); 67 disabled = atomic_inc_return(&data->disabled);
96 68
97 if (likely(disabled == 1)) 69 if (likely(disabled == 1))
98 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); 70 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
71 flags);
99 72
100 atomic_dec(&data->disabled); 73 atomic_dec(&data->disabled);
101 local_irq_restore(flags); 74 local_irq_restore(flags);
102} 75}
103 76
104static notrace void
105wake_up_callback(void *probe_data, void *call_data,
106 const char *format, va_list *args)
107{
108 struct task_struct *curr;
109 struct task_struct *task;
110 struct rq *__rq;
111
112 if (likely(!tracer_enabled))
113 return;
114
115 /* Skip pid %d state %ld */
116 (void)va_arg(*args, int);
117 (void)va_arg(*args, long);
118 /* now get the meat: "rq %p task %p rq->curr %p" */
119 __rq = va_arg(*args, typeof(__rq));
120 task = va_arg(*args, typeof(task));
121 curr = va_arg(*args, typeof(curr));
122
123 tracing_record_cmdline(task);
124 tracing_record_cmdline(curr);
125
126 wakeup_func(probe_data, __rq, task, curr);
127}
128
129static void sched_switch_reset(struct trace_array *tr) 77static void sched_switch_reset(struct trace_array *tr)
130{ 78{
131 int cpu; 79 int cpu;
@@ -140,60 +88,40 @@ static int tracing_sched_register(void)
140{ 88{
141 int ret; 89 int ret;
142 90
143 ret = marker_probe_register("kernel_sched_wakeup", 91 ret = register_trace_sched_wakeup(probe_sched_wakeup);
144 "pid %d state %ld ## rq %p task %p rq->curr %p",
145 wake_up_callback,
146 &ctx_trace);
147 if (ret) { 92 if (ret) {
148 pr_info("wakeup trace: Couldn't add marker" 93 pr_info("wakeup trace: Couldn't activate tracepoint"
149 " probe to kernel_sched_wakeup\n"); 94 " probe to kernel_sched_wakeup\n");
150 return ret; 95 return ret;
151 } 96 }
152 97
153 ret = marker_probe_register("kernel_sched_wakeup_new", 98 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
154 "pid %d state %ld ## rq %p task %p rq->curr %p",
155 wake_up_callback,
156 &ctx_trace);
157 if (ret) { 99 if (ret) {
158 pr_info("wakeup trace: Couldn't add marker" 100 pr_info("wakeup trace: Couldn't activate tracepoint"
159 " probe to kernel_sched_wakeup_new\n"); 101 " probe to kernel_sched_wakeup_new\n");
160 goto fail_deprobe; 102 goto fail_deprobe;
161 } 103 }
162 104
163 ret = marker_probe_register("kernel_sched_schedule", 105 ret = register_trace_sched_switch(probe_sched_switch);
164 "prev_pid %d next_pid %d prev_state %ld "
165 "## rq %p prev %p next %p",
166 sched_switch_callback,
167 &ctx_trace);
168 if (ret) { 106 if (ret) {
169 pr_info("sched trace: Couldn't add marker" 107 pr_info("sched trace: Couldn't activate tracepoint"
170 " probe to kernel_sched_schedule\n"); 108 " probe to kernel_sched_schedule\n");
171 goto fail_deprobe_wake_new; 109 goto fail_deprobe_wake_new;
172 } 110 }
173 111
174 return ret; 112 return ret;
175fail_deprobe_wake_new: 113fail_deprobe_wake_new:
176 marker_probe_unregister("kernel_sched_wakeup_new", 114 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
177 wake_up_callback,
178 &ctx_trace);
179fail_deprobe: 115fail_deprobe:
180 marker_probe_unregister("kernel_sched_wakeup", 116 unregister_trace_sched_wakeup(probe_sched_wakeup);
181 wake_up_callback,
182 &ctx_trace);
183 return ret; 117 return ret;
184} 118}
185 119
186static void tracing_sched_unregister(void) 120static void tracing_sched_unregister(void)
187{ 121{
188 marker_probe_unregister("kernel_sched_schedule", 122 unregister_trace_sched_switch(probe_sched_switch);
189 sched_switch_callback, 123 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
190 &ctx_trace); 124 unregister_trace_sched_wakeup(probe_sched_wakeup);
191 marker_probe_unregister("kernel_sched_wakeup_new",
192 wake_up_callback,
193 &ctx_trace);
194 marker_probe_unregister("kernel_sched_wakeup",
195 wake_up_callback,
196 &ctx_trace);
197} 125}
198 126
199static void tracing_start_sched_switch(void) 127static void tracing_start_sched_switch(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e303ccb62cdf..08206b4e29c4 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -15,7 +15,7 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/marker.h> 18#include <trace/sched.h>
19 19
20#include "trace.h" 20#include "trace.h"
21 21
@@ -112,18 +112,18 @@ static int report_latency(cycle_t delta)
112} 112}
113 113
114static void notrace 114static void notrace
115wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, 115probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
116 struct task_struct *next) 116 struct task_struct *next)
117{ 117{
118 unsigned long latency = 0, t0 = 0, t1 = 0; 118 unsigned long latency = 0, t0 = 0, t1 = 0;
119 struct trace_array **ptr = private;
120 struct trace_array *tr = *ptr;
121 struct trace_array_cpu *data; 119 struct trace_array_cpu *data;
122 cycle_t T0, T1, delta; 120 cycle_t T0, T1, delta;
123 unsigned long flags; 121 unsigned long flags;
124 long disabled; 122 long disabled;
125 int cpu; 123 int cpu;
126 124
125 tracing_record_cmdline(prev);
126
127 if (unlikely(!tracer_enabled)) 127 if (unlikely(!tracer_enabled))
128 return; 128 return;
129 129
@@ -140,11 +140,11 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
140 return; 140 return;
141 141
142 /* The task we are waiting for is waking up */ 142 /* The task we are waiting for is waking up */
143 data = tr->data[wakeup_cpu]; 143 data = wakeup_trace->data[wakeup_cpu];
144 144
145 /* disable local data, not wakeup_cpu data */ 145 /* disable local data, not wakeup_cpu data */
146 cpu = raw_smp_processor_id(); 146 cpu = raw_smp_processor_id();
147 disabled = atomic_inc_return(&tr->data[cpu]->disabled); 147 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
148 if (likely(disabled != 1)) 148 if (likely(disabled != 1))
149 goto out; 149 goto out;
150 150
@@ -155,7 +155,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
155 if (unlikely(!tracer_enabled || next != wakeup_task)) 155 if (unlikely(!tracer_enabled || next != wakeup_task))
156 goto out_unlock; 156 goto out_unlock;
157 157
158 trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); 158 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags);
159 159
160 /* 160 /*
161 * usecs conversion is slow so we try to delay the conversion 161 * usecs conversion is slow so we try to delay the conversion
@@ -174,39 +174,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174 t0 = nsecs_to_usecs(T0); 174 t0 = nsecs_to_usecs(T0);
175 t1 = nsecs_to_usecs(T1); 175 t1 = nsecs_to_usecs(T1);
176 176
177 update_max_tr(tr, wakeup_task, wakeup_cpu); 177 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
178 178
179out_unlock: 179out_unlock:
180 __wakeup_reset(tr); 180 __wakeup_reset(wakeup_trace);
181 __raw_spin_unlock(&wakeup_lock); 181 __raw_spin_unlock(&wakeup_lock);
182 local_irq_restore(flags); 182 local_irq_restore(flags);
183out: 183out:
184 atomic_dec(&tr->data[cpu]->disabled); 184 atomic_dec(&wakeup_trace->data[cpu]->disabled);
185}
186
187static notrace void
188sched_switch_callback(void *probe_data, void *call_data,
189 const char *format, va_list *args)
190{
191 struct task_struct *prev;
192 struct task_struct *next;
193 struct rq *__rq;
194
195 /* skip prev_pid %d next_pid %d prev_state %ld */
196 (void)va_arg(*args, int);
197 (void)va_arg(*args, int);
198 (void)va_arg(*args, long);
199 __rq = va_arg(*args, typeof(__rq));
200 prev = va_arg(*args, typeof(prev));
201 next = va_arg(*args, typeof(next));
202
203 tracing_record_cmdline(prev);
204
205 /*
206 * If tracer_switch_func only points to the local
207 * switch func, it still needs the ptr passed to it.
208 */
209 wakeup_sched_switch(probe_data, __rq, prev, next);
210} 185}
211 186
212static void __wakeup_reset(struct trace_array *tr) 187static void __wakeup_reset(struct trace_array *tr)
@@ -240,19 +215,24 @@ static void wakeup_reset(struct trace_array *tr)
240} 215}
241 216
242static void 217static void
243wakeup_check_start(struct trace_array *tr, struct task_struct *p, 218probe_wakeup(struct rq *rq, struct task_struct *p)
244 struct task_struct *curr)
245{ 219{
246 int cpu = smp_processor_id(); 220 int cpu = smp_processor_id();
247 unsigned long flags; 221 unsigned long flags;
248 long disabled; 222 long disabled;
249 223
224 if (likely(!tracer_enabled))
225 return;
226
227 tracing_record_cmdline(p);
228 tracing_record_cmdline(current);
229
250 if (likely(!rt_task(p)) || 230 if (likely(!rt_task(p)) ||
251 p->prio >= wakeup_prio || 231 p->prio >= wakeup_prio ||
252 p->prio >= curr->prio) 232 p->prio >= current->prio)
253 return; 233 return;
254 234
255 disabled = atomic_inc_return(&tr->data[cpu]->disabled); 235 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
256 if (unlikely(disabled != 1)) 236 if (unlikely(disabled != 1))
257 goto out; 237 goto out;
258 238
@@ -264,7 +244,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
264 goto out_locked; 244 goto out_locked;
265 245
266 /* reset the trace */ 246 /* reset the trace */
267 __wakeup_reset(tr); 247 __wakeup_reset(wakeup_trace);
268 248
269 wakeup_cpu = task_cpu(p); 249 wakeup_cpu = task_cpu(p);
270 wakeup_prio = p->prio; 250 wakeup_prio = p->prio;
@@ -274,74 +254,37 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274 254
275 local_save_flags(flags); 255 local_save_flags(flags);
276 256
277 tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); 257 wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
278 trace_function(tr, tr->data[wakeup_cpu], 258 trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
279 CALLER_ADDR1, CALLER_ADDR2, flags); 259 CALLER_ADDR1, CALLER_ADDR2, flags);
280 260
281out_locked: 261out_locked:
282 __raw_spin_unlock(&wakeup_lock); 262 __raw_spin_unlock(&wakeup_lock);
283out: 263out:
284 atomic_dec(&tr->data[cpu]->disabled); 264 atomic_dec(&wakeup_trace->data[cpu]->disabled);
285}
286
287static notrace void
288wake_up_callback(void *probe_data, void *call_data,
289 const char *format, va_list *args)
290{
291 struct trace_array **ptr = probe_data;
292 struct trace_array *tr = *ptr;
293 struct task_struct *curr;
294 struct task_struct *task;
295 struct rq *__rq;
296
297 if (likely(!tracer_enabled))
298 return;
299
300 /* Skip pid %d state %ld */
301 (void)va_arg(*args, int);
302 (void)va_arg(*args, long);
303 /* now get the meat: "rq %p task %p rq->curr %p" */
304 __rq = va_arg(*args, typeof(__rq));
305 task = va_arg(*args, typeof(task));
306 curr = va_arg(*args, typeof(curr));
307
308 tracing_record_cmdline(task);
309 tracing_record_cmdline(curr);
310
311 wakeup_check_start(tr, task, curr);
312} 265}
313 266
314static void start_wakeup_tracer(struct trace_array *tr) 267static void start_wakeup_tracer(struct trace_array *tr)
315{ 268{
316 int ret; 269 int ret;
317 270
318 ret = marker_probe_register("kernel_sched_wakeup", 271 ret = register_trace_sched_wakeup(probe_wakeup);
319 "pid %d state %ld ## rq %p task %p rq->curr %p",
320 wake_up_callback,
321 &wakeup_trace);
322 if (ret) { 272 if (ret) {
323 pr_info("wakeup trace: Couldn't add marker" 273 pr_info("wakeup trace: Couldn't activate tracepoint"
324 " probe to kernel_sched_wakeup\n"); 274 " probe to kernel_sched_wakeup\n");
325 return; 275 return;
326 } 276 }
327 277
328 ret = marker_probe_register("kernel_sched_wakeup_new", 278 ret = register_trace_sched_wakeup_new(probe_wakeup);
329 "pid %d state %ld ## rq %p task %p rq->curr %p",
330 wake_up_callback,
331 &wakeup_trace);
332 if (ret) { 279 if (ret) {
333 pr_info("wakeup trace: Couldn't add marker" 280 pr_info("wakeup trace: Couldn't activate tracepoint"
334 " probe to kernel_sched_wakeup_new\n"); 281 " probe to kernel_sched_wakeup_new\n");
335 goto fail_deprobe; 282 goto fail_deprobe;
336 } 283 }
337 284
338 ret = marker_probe_register("kernel_sched_schedule", 285 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
339 "prev_pid %d next_pid %d prev_state %ld "
340 "## rq %p prev %p next %p",
341 sched_switch_callback,
342 &wakeup_trace);
343 if (ret) { 286 if (ret) {
344 pr_info("sched trace: Couldn't add marker" 287 pr_info("sched trace: Couldn't activate tracepoint"
345 " probe to kernel_sched_schedule\n"); 288 " probe to kernel_sched_schedule\n");
346 goto fail_deprobe_wake_new; 289 goto fail_deprobe_wake_new;
347 } 290 }
@@ -363,28 +306,18 @@ static void start_wakeup_tracer(struct trace_array *tr)
363 306
364 return; 307 return;
365fail_deprobe_wake_new: 308fail_deprobe_wake_new:
366 marker_probe_unregister("kernel_sched_wakeup_new", 309 unregister_trace_sched_wakeup_new(probe_wakeup);
367 wake_up_callback,
368 &wakeup_trace);
369fail_deprobe: 310fail_deprobe:
370 marker_probe_unregister("kernel_sched_wakeup", 311 unregister_trace_sched_wakeup(probe_wakeup);
371 wake_up_callback,
372 &wakeup_trace);
373} 312}
374 313
375static void stop_wakeup_tracer(struct trace_array *tr) 314static void stop_wakeup_tracer(struct trace_array *tr)
376{ 315{
377 tracer_enabled = 0; 316 tracer_enabled = 0;
378 unregister_ftrace_function(&trace_ops); 317 unregister_ftrace_function(&trace_ops);
379 marker_probe_unregister("kernel_sched_schedule", 318 unregister_trace_sched_switch(probe_wakeup_sched_switch);
380 sched_switch_callback, 319 unregister_trace_sched_wakeup_new(probe_wakeup);
381 &wakeup_trace); 320 unregister_trace_sched_wakeup(probe_wakeup);
382 marker_probe_unregister("kernel_sched_wakeup_new",
383 wake_up_callback,
384 &wakeup_trace);
385 marker_probe_unregister("kernel_sched_wakeup",
386 wake_up_callback,
387 &wakeup_trace);
388} 321}
389 322
390static void wakeup_tracer_init(struct trace_array *tr) 323static void wakeup_tracer_init(struct trace_array *tr)