aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-10-05 16:38:49 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-10-18 10:53:33 -0400
commit542181d3769d001c59cd17573dd4381e87d215f2 (patch)
treeffb13617cd0fee70cdd1a9eb869daca272b07454 /kernel/trace
parent7495a5beaa22f190f4888aa8cbe4827c16575d0a (diff)
tracing: Use one prologue for the wakeup tracer function tracers
The wakeup tracer has three types of function tracers. Normal function tracer, function graph entry, and function graph return. Each of these use a complex dance to prevent recursion and whether to trace the data or not (depending on the wake_task variable). This patch moves the duplicate code into a single routine, to prevent future mistakes with modifying duplicate complex code. Cc: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_sched_wakeup.c102
1 files changed, 50 insertions, 52 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 033510dbb322..31689d2df7f3 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -56,43 +56,73 @@ static struct tracer_flags tracer_flags = {
56#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) 56#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
57 57
58#ifdef CONFIG_FUNCTION_TRACER 58#ifdef CONFIG_FUNCTION_TRACER
59
59/* 60/*
60 * wakeup uses its own tracer function to keep the overhead down: 61 * Prologue for the wakeup function tracers.
62 *
63 * Returns 1 if it is OK to continue, and preemption
64 * is disabled and data->disabled is incremented.
65 * 0 if the trace is to be ignored, and preemption
66 * is not disabled and data->disabled is
67 * kept the same.
68 *
69 * Note, this function is also used outside this ifdef but
70 * inside the #ifdef of the function graph tracer below.
71 * This is OK, since the function graph tracer is
72 * dependent on the function tracer.
61 */ 73 */
62static void 74static int
63wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) 75func_prolog_preempt_disable(struct trace_array *tr,
76 struct trace_array_cpu **data,
77 int *pc)
64{ 78{
65 struct trace_array *tr = wakeup_trace;
66 struct trace_array_cpu *data;
67 unsigned long flags;
68 long disabled; 79 long disabled;
69 int cpu; 80 int cpu;
70 int pc;
71 81
72 if (likely(!wakeup_task)) 82 if (likely(!wakeup_task))
73 return; 83 return 0;
74 84
75 pc = preempt_count(); 85 *pc = preempt_count();
76 preempt_disable_notrace(); 86 preempt_disable_notrace();
77 87
78 cpu = raw_smp_processor_id(); 88 cpu = raw_smp_processor_id();
79 if (cpu != wakeup_current_cpu) 89 if (cpu != wakeup_current_cpu)
80 goto out_enable; 90 goto out_enable;
81 91
82 data = tr->data[cpu]; 92 *data = tr->data[cpu];
83 disabled = atomic_inc_return(&data->disabled); 93 disabled = atomic_inc_return(&(*data)->disabled);
84 if (unlikely(disabled != 1)) 94 if (unlikely(disabled != 1))
85 goto out; 95 goto out;
86 96
87 local_irq_save(flags); 97 return 1;
88 98
89 trace_function(tr, ip, parent_ip, flags, pc); 99out:
100 atomic_dec(&(*data)->disabled);
101
102out_enable:
103 preempt_enable_notrace();
104 return 0;
105}
106
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
112{
113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data;
115 unsigned long flags;
116 int pc;
117
118 if (!func_prolog_preempt_disable(tr, &data, &pc))
119 return;
90 120
121 local_irq_save(flags);
122 trace_function(tr, ip, parent_ip, flags, pc);
91 local_irq_restore(flags); 123 local_irq_restore(flags);
92 124
93 out:
94 atomic_dec(&data->disabled); 125 atomic_dec(&data->disabled);
95 out_enable:
96 preempt_enable_notrace(); 126 preempt_enable_notrace();
97} 127}
98 128
@@ -154,32 +184,16 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
154 struct trace_array *tr = wakeup_trace; 184 struct trace_array *tr = wakeup_trace;
155 struct trace_array_cpu *data; 185 struct trace_array_cpu *data;
156 unsigned long flags; 186 unsigned long flags;
157 long disabled; 187 int pc, ret = 0;
158 int cpu, pc, ret = 0;
159 188
160 if (likely(!wakeup_task)) 189 if (!func_prolog_preempt_disable(tr, &data, &pc))
161 return 0; 190 return 0;
162 191
163 pc = preempt_count();
164 preempt_disable_notrace();
165
166 cpu = raw_smp_processor_id();
167 if (cpu != wakeup_current_cpu)
168 goto out_enable;
169
170 data = tr->data[cpu];
171 disabled = atomic_inc_return(&data->disabled);
172 if (unlikely(disabled != 1))
173 goto out;
174
175 local_save_flags(flags); 192 local_save_flags(flags);
176 ret = __trace_graph_entry(tr, trace, flags, pc); 193 ret = __trace_graph_entry(tr, trace, flags, pc);
177
178out:
179 atomic_dec(&data->disabled); 194 atomic_dec(&data->disabled);
180
181out_enable:
182 preempt_enable_notrace(); 195 preempt_enable_notrace();
196
183 return ret; 197 return ret;
184} 198}
185 199
@@ -188,31 +202,15 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
188 struct trace_array *tr = wakeup_trace; 202 struct trace_array *tr = wakeup_trace;
189 struct trace_array_cpu *data; 203 struct trace_array_cpu *data;
190 unsigned long flags; 204 unsigned long flags;
191 long disabled; 205 int pc;
192 int cpu, pc;
193 206
194 if (likely(!wakeup_task)) 207 if (!func_prolog_preempt_disable(tr, &data, &pc))
195 return; 208 return;
196 209
197 pc = preempt_count();
198 preempt_disable_notrace();
199
200 cpu = raw_smp_processor_id();
201 if (cpu != wakeup_current_cpu)
202 goto out_enable;
203
204 data = tr->data[cpu];
205 disabled = atomic_inc_return(&data->disabled);
206 if (unlikely(disabled != 1))
207 goto out;
208
209 local_save_flags(flags); 210 local_save_flags(flags);
210 __trace_graph_return(tr, trace, flags, pc); 211 __trace_graph_return(tr, trace, flags, pc);
211
212out:
213 atomic_dec(&data->disabled); 212 atomic_dec(&data->disabled);
214 213
215out_enable:
216 preempt_enable_notrace(); 214 preempt_enable_notrace();
217 return; 215 return;
218} 216}