aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_wakeup.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /kernel/trace/trace_sched_wakeup.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r--kernel/trace/trace_sched_wakeup.c257
1 files changed, 233 insertions, 24 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 4086eae6e81b..f029dd4fd2ca 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -31,57 +31,258 @@ static int wakeup_rt;
31static arch_spinlock_t wakeup_lock = 31static arch_spinlock_t wakeup_lock =
32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33 33
34static void wakeup_reset(struct trace_array *tr);
34static void __wakeup_reset(struct trace_array *tr); 35static void __wakeup_reset(struct trace_array *tr);
36static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
37static void wakeup_graph_return(struct ftrace_graph_ret *trace);
35 38
36static int save_lat_flag; 39static int save_lat_flag;
37 40
41#define TRACE_DISPLAY_GRAPH 1
42
43static struct tracer_opt trace_opts[] = {
44#ifdef CONFIG_FUNCTION_GRAPH_TRACER
45 /* display latency trace as call graph */
46 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
47#endif
48 { } /* Empty entry */
49};
50
51static struct tracer_flags tracer_flags = {
52 .val = 0,
53 .opts = trace_opts,
54};
55
56#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
57
38#ifdef CONFIG_FUNCTION_TRACER 58#ifdef CONFIG_FUNCTION_TRACER
59
39/* 60/*
40 * irqsoff uses its own tracer function to keep the overhead down: 61 * Prologue for the wakeup function tracers.
62 *
63 * Returns 1 if it is OK to continue, and preemption
64 * is disabled and data->disabled is incremented.
65 * 0 if the trace is to be ignored, and preemption
66 * is not disabled and data->disabled is
67 * kept the same.
68 *
69 * Note, this function is also used outside this ifdef but
70 * inside the #ifdef of the function graph tracer below.
71 * This is OK, since the function graph tracer is
72 * dependent on the function tracer.
41 */ 73 */
42static void 74static int
43wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) 75func_prolog_preempt_disable(struct trace_array *tr,
76 struct trace_array_cpu **data,
77 int *pc)
44{ 78{
45 struct trace_array *tr = wakeup_trace;
46 struct trace_array_cpu *data;
47 unsigned long flags;
48 long disabled; 79 long disabled;
49 int cpu; 80 int cpu;
50 int pc;
51 81
52 if (likely(!wakeup_task)) 82 if (likely(!wakeup_task))
53 return; 83 return 0;
54 84
55 pc = preempt_count(); 85 *pc = preempt_count();
56 preempt_disable_notrace(); 86 preempt_disable_notrace();
57 87
58 cpu = raw_smp_processor_id(); 88 cpu = raw_smp_processor_id();
59 if (cpu != wakeup_current_cpu) 89 if (cpu != wakeup_current_cpu)
60 goto out_enable; 90 goto out_enable;
61 91
62 data = tr->data[cpu]; 92 *data = tr->data[cpu];
63 disabled = atomic_inc_return(&data->disabled); 93 disabled = atomic_inc_return(&(*data)->disabled);
64 if (unlikely(disabled != 1)) 94 if (unlikely(disabled != 1))
65 goto out; 95 goto out;
66 96
67 local_irq_save(flags); 97 return 1;
68 98
69 trace_function(tr, ip, parent_ip, flags, pc); 99out:
100 atomic_dec(&(*data)->disabled);
101
102out_enable:
103 preempt_enable_notrace();
104 return 0;
105}
70 106
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
112{
113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data;
115 unsigned long flags;
116 int pc;
117
118 if (!func_prolog_preempt_disable(tr, &data, &pc))
119 return;
120
121 local_irq_save(flags);
122 trace_function(tr, ip, parent_ip, flags, pc);
71 local_irq_restore(flags); 123 local_irq_restore(flags);
72 124
73 out:
74 atomic_dec(&data->disabled); 125 atomic_dec(&data->disabled);
75 out_enable:
76 preempt_enable_notrace(); 126 preempt_enable_notrace();
77} 127}
78 128
79static struct ftrace_ops trace_ops __read_mostly = 129static struct ftrace_ops trace_ops __read_mostly =
80{ 130{
81 .func = wakeup_tracer_call, 131 .func = wakeup_tracer_call,
132 .flags = FTRACE_OPS_FL_GLOBAL,
82}; 133};
83#endif /* CONFIG_FUNCTION_TRACER */ 134#endif /* CONFIG_FUNCTION_TRACER */
84 135
136static int start_func_tracer(int graph)
137{
138 int ret;
139
140 if (!graph)
141 ret = register_ftrace_function(&trace_ops);
142 else
143 ret = register_ftrace_graph(&wakeup_graph_return,
144 &wakeup_graph_entry);
145
146 if (!ret && tracing_is_enabled())
147 tracer_enabled = 1;
148 else
149 tracer_enabled = 0;
150
151 return ret;
152}
153
154static void stop_func_tracer(int graph)
155{
156 tracer_enabled = 0;
157
158 if (!graph)
159 unregister_ftrace_function(&trace_ops);
160 else
161 unregister_ftrace_graph();
162}
163
164#ifdef CONFIG_FUNCTION_GRAPH_TRACER
165static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
166{
167
168 if (!(bit & TRACE_DISPLAY_GRAPH))
169 return -EINVAL;
170
171 if (!(is_graph() ^ set))
172 return 0;
173
174 stop_func_tracer(!set);
175
176 wakeup_reset(wakeup_trace);
177 tracing_max_latency = 0;
178
179 return start_func_tracer(set);
180}
181
182static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
183{
184 struct trace_array *tr = wakeup_trace;
185 struct trace_array_cpu *data;
186 unsigned long flags;
187 int pc, ret = 0;
188
189 if (!func_prolog_preempt_disable(tr, &data, &pc))
190 return 0;
191
192 local_save_flags(flags);
193 ret = __trace_graph_entry(tr, trace, flags, pc);
194 atomic_dec(&data->disabled);
195 preempt_enable_notrace();
196
197 return ret;
198}
199
200static void wakeup_graph_return(struct ftrace_graph_ret *trace)
201{
202 struct trace_array *tr = wakeup_trace;
203 struct trace_array_cpu *data;
204 unsigned long flags;
205 int pc;
206
207 if (!func_prolog_preempt_disable(tr, &data, &pc))
208 return;
209
210 local_save_flags(flags);
211 __trace_graph_return(tr, trace, flags, pc);
212 atomic_dec(&data->disabled);
213
214 preempt_enable_notrace();
215 return;
216}
217
218static void wakeup_trace_open(struct trace_iterator *iter)
219{
220 if (is_graph())
221 graph_trace_open(iter);
222}
223
224static void wakeup_trace_close(struct trace_iterator *iter)
225{
226 if (iter->private)
227 graph_trace_close(iter);
228}
229
230#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC)
231
232static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
233{
234 /*
235 * In graph mode call the graph tracer output function,
236 * otherwise go with the TRACE_FN event handler
237 */
238 if (is_graph())
239 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
240
241 return TRACE_TYPE_UNHANDLED;
242}
243
244static void wakeup_print_header(struct seq_file *s)
245{
246 if (is_graph())
247 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
248 else
249 trace_default_header(s);
250}
251
252static void
253__trace_function(struct trace_array *tr,
254 unsigned long ip, unsigned long parent_ip,
255 unsigned long flags, int pc)
256{
257 if (is_graph())
258 trace_graph_function(tr, ip, parent_ip, flags, pc);
259 else
260 trace_function(tr, ip, parent_ip, flags, pc);
261}
262#else
263#define __trace_function trace_function
264
265static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
266{
267 return -EINVAL;
268}
269
270static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
271{
272 return -1;
273}
274
275static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
276{
277 return TRACE_TYPE_UNHANDLED;
278}
279
280static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
281static void wakeup_print_header(struct seq_file *s) { }
282static void wakeup_trace_open(struct trace_iterator *iter) { }
283static void wakeup_trace_close(struct trace_iterator *iter) { }
284#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
285
85/* 286/*
86 * Should this new latency be reported/recorded? 287 * Should this new latency be reported/recorded?
87 */ 288 */
@@ -152,7 +353,7 @@ probe_wakeup_sched_switch(void *ignore,
152 /* The task we are waiting for is waking up */ 353 /* The task we are waiting for is waking up */
153 data = wakeup_trace->data[wakeup_cpu]; 354 data = wakeup_trace->data[wakeup_cpu];
154 355
155 trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 356 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
156 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 357 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
157 358
158 T0 = data->preempt_timestamp; 359 T0 = data->preempt_timestamp;
@@ -252,7 +453,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
252 * is not called by an assembly function (where as schedule is) 453 * is not called by an assembly function (where as schedule is)
253 * it should be safe to use it here. 454 * it should be safe to use it here.
254 */ 455 */
255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 456 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
256 457
257out_locked: 458out_locked:
258 arch_spin_unlock(&wakeup_lock); 459 arch_spin_unlock(&wakeup_lock);
@@ -303,12 +504,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
303 */ 504 */
304 smp_wmb(); 505 smp_wmb();
305 506
306 register_ftrace_function(&trace_ops); 507 if (start_func_tracer(is_graph()))
307 508 printk(KERN_ERR "failed to start wakeup tracer\n");
308 if (tracing_is_enabled())
309 tracer_enabled = 1;
310 else
311 tracer_enabled = 0;
312 509
313 return; 510 return;
314fail_deprobe_wake_new: 511fail_deprobe_wake_new:
@@ -320,7 +517,7 @@ fail_deprobe:
320static void stop_wakeup_tracer(struct trace_array *tr) 517static void stop_wakeup_tracer(struct trace_array *tr)
321{ 518{
322 tracer_enabled = 0; 519 tracer_enabled = 0;
323 unregister_ftrace_function(&trace_ops); 520 stop_func_tracer(is_graph());
324 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); 521 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
325 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 522 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
326 unregister_trace_sched_wakeup(probe_wakeup, NULL); 523 unregister_trace_sched_wakeup(probe_wakeup, NULL);
@@ -379,9 +576,15 @@ static struct tracer wakeup_tracer __read_mostly =
379 .start = wakeup_tracer_start, 576 .start = wakeup_tracer_start,
380 .stop = wakeup_tracer_stop, 577 .stop = wakeup_tracer_stop,
381 .print_max = 1, 578 .print_max = 1,
579 .print_header = wakeup_print_header,
580 .print_line = wakeup_print_line,
581 .flags = &tracer_flags,
582 .set_flag = wakeup_set_flag,
382#ifdef CONFIG_FTRACE_SELFTEST 583#ifdef CONFIG_FTRACE_SELFTEST
383 .selftest = trace_selftest_startup_wakeup, 584 .selftest = trace_selftest_startup_wakeup,
384#endif 585#endif
586 .open = wakeup_trace_open,
587 .close = wakeup_trace_close,
385 .use_max_tr = 1, 588 .use_max_tr = 1,
386}; 589};
387 590
@@ -394,9 +597,15 @@ static struct tracer wakeup_rt_tracer __read_mostly =
394 .stop = wakeup_tracer_stop, 597 .stop = wakeup_tracer_stop,
395 .wait_pipe = poll_wait_pipe, 598 .wait_pipe = poll_wait_pipe,
396 .print_max = 1, 599 .print_max = 1,
600 .print_header = wakeup_print_header,
601 .print_line = wakeup_print_line,
602 .flags = &tracer_flags,
603 .set_flag = wakeup_set_flag,
397#ifdef CONFIG_FTRACE_SELFTEST 604#ifdef CONFIG_FTRACE_SELFTEST
398 .selftest = trace_selftest_startup_wakeup, 605 .selftest = trace_selftest_startup_wakeup,
399#endif 606#endif
607 .open = wakeup_trace_open,
608 .close = wakeup_trace_close,
400 .use_max_tr = 1, 609 .use_max_tr = 1,
401}; 610};
402 611