aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_wakeup.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 14:46:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 14:46:59 -0500
commit495d714ad140e1732e66c45d0409054b24c1a0d6 (patch)
tree373ec6619adea47d848d36f140b32def27164bbd /kernel/trace/trace_sched_wakeup.c
parentf12e840c819bab42621685558a01d3f46ab9a226 (diff)
parent3d739c1f6156c70eb0548aa288dcfbac9e0bd162 (diff)
Merge tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: - Rework of the kprobe/uprobe and synthetic events to consolidate all the dynamic event code. This will make changes in the future easier. - Partial rewrite of the function graph tracing infrastructure. This will allow for multiple users of hooking onto functions to get the callback (return) of the function. This is the ground work for having kprobes and function graph tracer using one code base. - Clean up of the histogram code that will facilitate adding more features to the histograms in the future. - Addition of str_has_prefix() and a few use cases. There currently is a similar function strstart() that is used in a few places, but only returns a bool and not a length. These instances will be removed in the future to use str_has_prefix() instead. - A few other various clean ups as well. * tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits) tracing: Use the return of str_has_prefix() to remove open coded numbers tracing: Have the historgram use the result of str_has_prefix() for len of prefix tracing: Use str_has_prefix() instead of using fixed sizes tracing: Use str_has_prefix() helper for histogram code string.h: Add str_has_prefix() helper function tracing: Make function ‘ftrace_exports’ static tracing: Simplify printf'ing in seq_print_sym tracing: Avoid -Wformat-nonliteral warning tracing: Merge seq_print_sym_short() and seq_print_sym_offset() tracing: Add hist trigger comments for variable-related fields tracing: Remove hist trigger synth_var_refs tracing: Use hist trigger's var_ref array to destroy var_refs tracing: Remove open-coding of hist trigger var_ref management tracing: Use var_refs[] for hist trigger reference checking tracing: Change strlen to sizeof for hist trigger static strings tracing: Remove unnecessary hist trigger struct field tracing: Fix ftrace_graph_get_ret_stack() to use task and not current seq_buf: Use size_t for len in seq_buf_puts() seq_buf: Make seq_buf_puts() null-terminate the buffer arm64: Use ftrace_graph_get_ret_stack() instead of curr_ret_stack ...
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r--kernel/trace/trace_sched_wakeup.c270
1 files changed, 128 insertions, 142 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 7d04b9890755..4ea7e6845efb 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -35,26 +35,19 @@ static arch_spinlock_t wakeup_lock =
35 35
36static void wakeup_reset(struct trace_array *tr); 36static void wakeup_reset(struct trace_array *tr);
37static void __wakeup_reset(struct trace_array *tr); 37static void __wakeup_reset(struct trace_array *tr);
38static int start_func_tracer(struct trace_array *tr, int graph);
39static void stop_func_tracer(struct trace_array *tr, int graph);
38 40
39static int save_flags; 41static int save_flags;
40 42
41#ifdef CONFIG_FUNCTION_GRAPH_TRACER 43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
42static int wakeup_display_graph(struct trace_array *tr, int set);
43# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) 44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
44#else 45#else
45static inline int wakeup_display_graph(struct trace_array *tr, int set)
46{
47 return 0;
48}
49# define is_graph(tr) false 46# define is_graph(tr) false
50#endif 47#endif
51 48
52
53#ifdef CONFIG_FUNCTION_TRACER 49#ifdef CONFIG_FUNCTION_TRACER
54 50
55static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
56static void wakeup_graph_return(struct ftrace_graph_ret *trace);
57
58static bool function_enabled; 51static bool function_enabled;
59 52
60/* 53/*
@@ -104,122 +97,8 @@ out_enable:
104 return 0; 97 return 0;
105} 98}
106 99
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
112 struct ftrace_ops *op, struct pt_regs *pt_regs)
113{
114 struct trace_array *tr = wakeup_trace;
115 struct trace_array_cpu *data;
116 unsigned long flags;
117 int pc;
118
119 if (!func_prolog_preempt_disable(tr, &data, &pc))
120 return;
121
122 local_irq_save(flags);
123 trace_function(tr, ip, parent_ip, flags, pc);
124 local_irq_restore(flags);
125
126 atomic_dec(&data->disabled);
127 preempt_enable_notrace();
128}
129
130static int register_wakeup_function(struct trace_array *tr, int graph, int set)
131{
132 int ret;
133
134 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
135 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
136 return 0;
137
138 if (graph)
139 ret = register_ftrace_graph(&wakeup_graph_return,
140 &wakeup_graph_entry);
141 else
142 ret = register_ftrace_function(tr->ops);
143
144 if (!ret)
145 function_enabled = true;
146
147 return ret;
148}
149
150static void unregister_wakeup_function(struct trace_array *tr, int graph)
151{
152 if (!function_enabled)
153 return;
154
155 if (graph)
156 unregister_ftrace_graph();
157 else
158 unregister_ftrace_function(tr->ops);
159
160 function_enabled = false;
161}
162
163static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
164{
165 if (!(mask & TRACE_ITER_FUNCTION))
166 return 0;
167
168 if (set)
169 register_wakeup_function(tr, is_graph(tr), 1);
170 else
171 unregister_wakeup_function(tr, is_graph(tr));
172 return 1;
173}
174#else
175static int register_wakeup_function(struct trace_array *tr, int graph, int set)
176{
177 return 0;
178}
179static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
180static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
181{
182 return 0;
183}
184#endif /* CONFIG_FUNCTION_TRACER */
185
186static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187{
188 struct tracer *tracer = tr->current_trace;
189
190 if (wakeup_function_set(tr, mask, set))
191 return 0;
192
193#ifdef CONFIG_FUNCTION_GRAPH_TRACER 100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
194 if (mask & TRACE_ITER_DISPLAY_GRAPH)
195 return wakeup_display_graph(tr, set);
196#endif
197 101
198 return trace_keep_overwrite(tracer, mask, set);
199}
200
201static int start_func_tracer(struct trace_array *tr, int graph)
202{
203 int ret;
204
205 ret = register_wakeup_function(tr, graph, 0);
206
207 if (!ret && tracing_is_enabled())
208 tracer_enabled = 1;
209 else
210 tracer_enabled = 0;
211
212 return ret;
213}
214
215static void stop_func_tracer(struct trace_array *tr, int graph)
216{
217 tracer_enabled = 0;
218
219 unregister_wakeup_function(tr, graph);
220}
221
222#ifdef CONFIG_FUNCTION_GRAPH_TRACER
223static int wakeup_display_graph(struct trace_array *tr, int set) 102static int wakeup_display_graph(struct trace_array *tr, int set)
224{ 103{
225 if (!(is_graph(tr) ^ set)) 104 if (!(is_graph(tr) ^ set))
@@ -283,6 +162,11 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
283 return; 162 return;
284} 163}
285 164
165static struct fgraph_ops fgraph_wakeup_ops = {
166 .entryfunc = &wakeup_graph_entry,
167 .retfunc = &wakeup_graph_return,
168};
169
286static void wakeup_trace_open(struct trace_iterator *iter) 170static void wakeup_trace_open(struct trace_iterator *iter)
287{ 171{
288 if (is_graph(iter->tr)) 172 if (is_graph(iter->tr))
@@ -318,20 +202,87 @@ static void wakeup_print_header(struct seq_file *s)
318 else 202 else
319 trace_default_header(s); 203 trace_default_header(s);
320} 204}
205#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
321 206
207/*
208 * wakeup uses its own tracer function to keep the overhead down:
209 */
322static void 210static void
323__trace_function(struct trace_array *tr, 211wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
324 unsigned long ip, unsigned long parent_ip, 212 struct ftrace_ops *op, struct pt_regs *pt_regs)
325 unsigned long flags, int pc)
326{ 213{
327 if (is_graph(tr)) 214 struct trace_array *tr = wakeup_trace;
328 trace_graph_function(tr, ip, parent_ip, flags, pc); 215 struct trace_array_cpu *data;
216 unsigned long flags;
217 int pc;
218
219 if (!func_prolog_preempt_disable(tr, &data, &pc))
220 return;
221
222 local_irq_save(flags);
223 trace_function(tr, ip, parent_ip, flags, pc);
224 local_irq_restore(flags);
225
226 atomic_dec(&data->disabled);
227 preempt_enable_notrace();
228}
229
230static int register_wakeup_function(struct trace_array *tr, int graph, int set)
231{
232 int ret;
233
234 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
235 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
236 return 0;
237
238 if (graph)
239 ret = register_ftrace_graph(&fgraph_wakeup_ops);
329 else 240 else
330 trace_function(tr, ip, parent_ip, flags, pc); 241 ret = register_ftrace_function(tr->ops);
242
243 if (!ret)
244 function_enabled = true;
245
246 return ret;
331} 247}
332#else
333#define __trace_function trace_function
334 248
249static void unregister_wakeup_function(struct trace_array *tr, int graph)
250{
251 if (!function_enabled)
252 return;
253
254 if (graph)
255 unregister_ftrace_graph(&fgraph_wakeup_ops);
256 else
257 unregister_ftrace_function(tr->ops);
258
259 function_enabled = false;
260}
261
262static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
263{
264 if (!(mask & TRACE_ITER_FUNCTION))
265 return 0;
266
267 if (set)
268 register_wakeup_function(tr, is_graph(tr), 1);
269 else
270 unregister_wakeup_function(tr, is_graph(tr));
271 return 1;
272}
273#else /* CONFIG_FUNCTION_TRACER */
274static int register_wakeup_function(struct trace_array *tr, int graph, int set)
275{
276 return 0;
277}
278static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
279static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
280{
281 return 0;
282}
283#endif /* else CONFIG_FUNCTION_TRACER */
284
285#ifndef CONFIG_FUNCTION_GRAPH_TRACER
335static enum print_line_t wakeup_print_line(struct trace_iterator *iter) 286static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
336{ 287{
337 return TRACE_TYPE_UNHANDLED; 288 return TRACE_TYPE_UNHANDLED;
@@ -340,23 +291,58 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
340static void wakeup_trace_open(struct trace_iterator *iter) { } 291static void wakeup_trace_open(struct trace_iterator *iter) { }
341static void wakeup_trace_close(struct trace_iterator *iter) { } 292static void wakeup_trace_close(struct trace_iterator *iter) { }
342 293
343#ifdef CONFIG_FUNCTION_TRACER
344static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
345{
346 return -1;
347}
348static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
349static void wakeup_print_header(struct seq_file *s) 294static void wakeup_print_header(struct seq_file *s)
350{ 295{
351 trace_default_header(s); 296 trace_default_header(s);
352} 297}
353#else 298#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
354static void wakeup_print_header(struct seq_file *s) 299
300static void
301__trace_function(struct trace_array *tr,
302 unsigned long ip, unsigned long parent_ip,
303 unsigned long flags, int pc)
355{ 304{
356 trace_latency_header(s); 305 if (is_graph(tr))
306 trace_graph_function(tr, ip, parent_ip, flags, pc);
307 else
308 trace_function(tr, ip, parent_ip, flags, pc);
309}
310
311static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
312{
313 struct tracer *tracer = tr->current_trace;
314
315 if (wakeup_function_set(tr, mask, set))
316 return 0;
317
318#ifdef CONFIG_FUNCTION_GRAPH_TRACER
319 if (mask & TRACE_ITER_DISPLAY_GRAPH)
320 return wakeup_display_graph(tr, set);
321#endif
322
323 return trace_keep_overwrite(tracer, mask, set);
324}
325
326static int start_func_tracer(struct trace_array *tr, int graph)
327{
328 int ret;
329
330 ret = register_wakeup_function(tr, graph, 0);
331
332 if (!ret && tracing_is_enabled())
333 tracer_enabled = 1;
334 else
335 tracer_enabled = 0;
336
337 return ret;
338}
339
340static void stop_func_tracer(struct trace_array *tr, int graph)
341{
342 tracer_enabled = 0;
343
344 unregister_wakeup_function(tr, graph);
357} 345}
358#endif /* CONFIG_FUNCTION_TRACER */
359#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
360 346
361/* 347/*
362 * Should this new latency be reported/recorded? 348 * Should this new latency be reported/recorded?