aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c163
1 files changed, 57 insertions, 106 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 73a6b0601f2e..c77424be284d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -80,21 +80,29 @@ static struct tracer_flags tracer_flags = {
80 * skip the latency if the sequence has changed - some other section 80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console 81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare 82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesnt 83 * and what happens together happens separately as well, so this doesn't
84 * decrease the validity of the maximum found: 84 * decrease the validity of the maximum found:
85 */ 85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence; 86static __cacheline_aligned_in_smp unsigned long max_sequence;
87 87
88#ifdef CONFIG_FUNCTION_TRACER 88#ifdef CONFIG_FUNCTION_TRACER
89/* 89/*
90 * irqsoff uses its own tracer function to keep the overhead down: 90 * Prologue for the preempt and irqs off function tracers.
91 *
92 * Returns 1 if it is OK to continue, and data->disabled is
93 * incremented.
94 * 0 if the trace is to be ignored, and data->disabled
95 * is kept the same.
96 *
97 * Note, this function is also used outside this ifdef but
98 * inside the #ifdef of the function graph tracer below.
99 * This is OK, since the function graph tracer is
100 * dependent on the function tracer.
91 */ 101 */
92static void 102static int func_prolog_dec(struct trace_array *tr,
93irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 103 struct trace_array_cpu **data,
104 unsigned long *flags)
94{ 105{
95 struct trace_array *tr = irqsoff_trace;
96 struct trace_array_cpu *data;
97 unsigned long flags;
98 long disabled; 106 long disabled;
99 int cpu; 107 int cpu;
100 108
@@ -106,18 +114,38 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
106 */ 114 */
107 cpu = raw_smp_processor_id(); 115 cpu = raw_smp_processor_id();
108 if (likely(!per_cpu(tracing_cpu, cpu))) 116 if (likely(!per_cpu(tracing_cpu, cpu)))
109 return; 117 return 0;
110 118
111 local_save_flags(flags); 119 local_save_flags(*flags);
112 /* slight chance to get a false positive on tracing_cpu */ 120 /* slight chance to get a false positive on tracing_cpu */
113 if (!irqs_disabled_flags(flags)) 121 if (!irqs_disabled_flags(*flags))
114 return; 122 return 0;
115 123
116 data = tr->data[cpu]; 124 *data = tr->data[cpu];
117 disabled = atomic_inc_return(&data->disabled); 125 disabled = atomic_inc_return(&(*data)->disabled);
118 126
119 if (likely(disabled == 1)) 127 if (likely(disabled == 1))
120 trace_function(tr, ip, parent_ip, flags, preempt_count()); 128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133}
134
135/*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138static void
139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
140{
141 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data;
143 unsigned long flags;
144
145 if (!func_prolog_dec(tr, &data, &flags))
146 return;
147
148 trace_function(tr, ip, parent_ip, flags, preempt_count());
121 149
122 atomic_dec(&data->disabled); 150 atomic_dec(&data->disabled);
123} 151}
@@ -125,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
125static struct ftrace_ops trace_ops __read_mostly = 153static struct ftrace_ops trace_ops __read_mostly =
126{ 154{
127 .func = irqsoff_tracer_call, 155 .func = irqsoff_tracer_call,
156 .flags = FTRACE_OPS_FL_GLOBAL,
128}; 157};
129#endif /* CONFIG_FUNCTION_TRACER */ 158#endif /* CONFIG_FUNCTION_TRACER */
130 159
@@ -155,30 +184,16 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
155 struct trace_array *tr = irqsoff_trace; 184 struct trace_array *tr = irqsoff_trace;
156 struct trace_array_cpu *data; 185 struct trace_array_cpu *data;
157 unsigned long flags; 186 unsigned long flags;
158 long disabled;
159 int ret; 187 int ret;
160 int cpu;
161 int pc; 188 int pc;
162 189
163 cpu = raw_smp_processor_id(); 190 if (!func_prolog_dec(tr, &data, &flags))
164 if (likely(!per_cpu(tracing_cpu, cpu)))
165 return 0;
166
167 local_save_flags(flags);
168 /* slight chance to get a false positive on tracing_cpu */
169 if (!irqs_disabled_flags(flags))
170 return 0; 191 return 0;
171 192
172 data = tr->data[cpu]; 193 pc = preempt_count();
173 disabled = atomic_inc_return(&data->disabled); 194 ret = __trace_graph_entry(tr, trace, flags, pc);
174
175 if (likely(disabled == 1)) {
176 pc = preempt_count();
177 ret = __trace_graph_entry(tr, trace, flags, pc);
178 } else
179 ret = 0;
180
181 atomic_dec(&data->disabled); 195 atomic_dec(&data->disabled);
196
182 return ret; 197 return ret;
183} 198}
184 199
@@ -187,27 +202,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
187 struct trace_array *tr = irqsoff_trace; 202 struct trace_array *tr = irqsoff_trace;
188 struct trace_array_cpu *data; 203 struct trace_array_cpu *data;
189 unsigned long flags; 204 unsigned long flags;
190 long disabled;
191 int cpu;
192 int pc; 205 int pc;
193 206
194 cpu = raw_smp_processor_id(); 207 if (!func_prolog_dec(tr, &data, &flags))
195 if (likely(!per_cpu(tracing_cpu, cpu)))
196 return;
197
198 local_save_flags(flags);
199 /* slight chance to get a false positive on tracing_cpu */
200 if (!irqs_disabled_flags(flags))
201 return; 208 return;
202 209
203 data = tr->data[cpu]; 210 pc = preempt_count();
204 disabled = atomic_inc_return(&data->disabled); 211 __trace_graph_return(tr, trace, flags, pc);
205
206 if (likely(disabled == 1)) {
207 pc = preempt_count();
208 __trace_graph_return(tr, trace, flags, pc);
209 }
210
211 atomic_dec(&data->disabled); 212 atomic_dec(&data->disabled);
212} 213}
213 214
@@ -229,75 +230,33 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
229 230
230static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) 231static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
231{ 232{
232 u32 flags = GRAPH_TRACER_FLAGS;
233
234 if (trace_flags & TRACE_ITER_LATENCY_FMT)
235 flags |= TRACE_GRAPH_PRINT_DURATION;
236 else
237 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
238
239 /* 233 /*
240 * In graph mode call the graph tracer output function, 234 * In graph mode call the graph tracer output function,
241 * otherwise go with the TRACE_FN event handler 235 * otherwise go with the TRACE_FN event handler
242 */ 236 */
243 if (is_graph()) 237 if (is_graph())
244 return print_graph_function_flags(iter, flags); 238 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
245 239
246 return TRACE_TYPE_UNHANDLED; 240 return TRACE_TYPE_UNHANDLED;
247} 241}
248 242
249static void irqsoff_print_header(struct seq_file *s) 243static void irqsoff_print_header(struct seq_file *s)
250{ 244{
251 if (is_graph()) { 245 if (is_graph())
252 struct trace_iterator *iter = s->private; 246 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
253 u32 flags = GRAPH_TRACER_FLAGS; 247 else
254
255 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
256 /* print nothing if the buffers are empty */
257 if (trace_empty(iter))
258 return;
259
260 print_trace_header(s, iter);
261 flags |= TRACE_GRAPH_PRINT_DURATION;
262 } else
263 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
264
265 print_graph_headers_flags(s, flags);
266 } else
267 trace_default_header(s); 248 trace_default_header(s);
268} 249}
269 250
270static void 251static void
271trace_graph_function(struct trace_array *tr,
272 unsigned long ip, unsigned long flags, int pc)
273{
274 u64 time = trace_clock_local();
275 struct ftrace_graph_ent ent = {
276 .func = ip,
277 .depth = 0,
278 };
279 struct ftrace_graph_ret ret = {
280 .func = ip,
281 .depth = 0,
282 .calltime = time,
283 .rettime = time,
284 };
285
286 __trace_graph_entry(tr, &ent, flags, pc);
287 __trace_graph_return(tr, &ret, flags, pc);
288}
289
290static void
291__trace_function(struct trace_array *tr, 252__trace_function(struct trace_array *tr,
292 unsigned long ip, unsigned long parent_ip, 253 unsigned long ip, unsigned long parent_ip,
293 unsigned long flags, int pc) 254 unsigned long flags, int pc)
294{ 255{
295 if (!is_graph()) 256 if (is_graph())
257 trace_graph_function(tr, ip, parent_ip, flags, pc);
258 else
296 trace_function(tr, ip, parent_ip, flags, pc); 259 trace_function(tr, ip, parent_ip, flags, pc);
297 else {
298 trace_graph_function(tr, parent_ip, flags, pc);
299 trace_graph_function(tr, ip, flags, pc);
300 }
301} 260}
302 261
303#else 262#else
@@ -495,14 +454,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
495 * Stubs: 454 * Stubs:
496 */ 455 */
497 456
498void early_boot_irqs_off(void)
499{
500}
501
502void early_boot_irqs_on(void)
503{
504}
505
506void trace_softirqs_on(unsigned long ip) 457void trace_softirqs_on(unsigned long ip)
507{ 458{
508} 459}