diff options
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 5 | ||||
-rw-r--r-- | kernel/trace/Makefile | 2 | ||||
-rw-r--r-- | kernel/trace/trace.c | 1547 | ||||
-rw-r--r-- | kernel/trace/trace.h | 184 |
5 files changed, 1739 insertions, 0 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index fa05f6d8bdbf..7e344e7b0cb3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -70,6 +70,7 @@ obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | |||
70 | obj-$(CONFIG_MARKERS) += marker.o | 70 | obj-$(CONFIG_MARKERS) += marker.o |
71 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 71 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
72 | obj-$(CONFIG_FTRACE) += trace/ | 72 | obj-$(CONFIG_FTRACE) += trace/ |
73 | obj-$(CONFIG_TRACING) += trace/ | ||
73 | 74 | ||
74 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) | 75 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) |
75 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 76 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8185c91417bc..ce70677afbf9 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -3,3 +3,8 @@ | |||
3 | # | 3 | # |
4 | config HAVE_FTRACE | 4 | config HAVE_FTRACE |
5 | bool | 5 | bool |
6 | |||
7 | config TRACING | ||
8 | bool | ||
9 | select DEBUG_FS | ||
10 | |||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index bf4fd215a6a9..7af403175255 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -1,3 +1,5 @@ | |||
1 | obj-$(CONFIG_FTRACE) += libftrace.o | 1 | obj-$(CONFIG_FTRACE) += libftrace.o |
2 | 2 | ||
3 | obj-$(CONFIG_TRACING) += trace.o | ||
4 | |||
3 | libftrace-y := ftrace.o | 5 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c new file mode 100644 index 000000000000..1b8eca7650d4 --- /dev/null +++ b/kernel/trace/trace.c | |||
@@ -0,0 +1,1547 @@ | |||
1 | /* | ||
2 | * ring buffer based function tracer | ||
3 | * | ||
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | ||
6 | * | ||
7 | * Originally taken from the RT patch by: | ||
8 | * Arnaldo Carvalho de Melo <acme@redhat.com> | ||
9 | * | ||
10 | * Based on code from the latency_tracer, that is: | ||
11 | * Copyright (C) 2004-2006 Ingo Molnar | ||
12 | * Copyright (C) 2004 William Lee Irwin III | ||
13 | */ | ||
14 | #include <linux/utsrelease.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/debugfs.h> | ||
18 | #include <linux/hardirq.h> | ||
19 | #include <linux/linkage.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <linux/ftrace.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/percpu.h> | ||
24 | #include <linux/ctype.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/gfp.h> | ||
27 | #include <linux/fs.h> | ||
28 | |||
29 | #include "trace.h" | ||
30 | |||
31 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | ||
32 | unsigned long __read_mostly tracing_thresh; | ||
33 | |||
34 | static long notrace | ||
35 | ns2usecs(cycle_t nsec) | ||
36 | { | ||
37 | nsec += 500; | ||
38 | do_div(nsec, 1000); | ||
39 | return nsec; | ||
40 | } | ||
41 | |||
42 | static atomic_t tracer_counter; | ||
43 | static struct trace_array global_trace; | ||
44 | |||
45 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | ||
46 | |||
47 | static struct trace_array max_tr; | ||
48 | |||
49 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | ||
50 | |||
51 | static int tracer_enabled; | ||
52 | static unsigned long trace_nr_entries = 4096UL; | ||
53 | |||
54 | static struct tracer *trace_types __read_mostly; | ||
55 | static struct tracer *current_trace __read_mostly; | ||
56 | static int max_tracer_type_len; | ||
57 | |||
58 | static DEFINE_MUTEX(trace_types_lock); | ||
59 | |||
60 | static int __init set_nr_entries(char *str) | ||
61 | { | ||
62 | if (!str) | ||
63 | return 0; | ||
64 | trace_nr_entries = simple_strtoul(str, &str, 0); | ||
65 | return 1; | ||
66 | } | ||
67 | __setup("trace_entries=", set_nr_entries); | ||
68 | |||
69 | enum trace_type { | ||
70 | __TRACE_FIRST_TYPE = 0, | ||
71 | |||
72 | TRACE_FN, | ||
73 | TRACE_CTX, | ||
74 | |||
75 | __TRACE_LAST_TYPE | ||
76 | }; | ||
77 | |||
78 | enum trace_flag_type { | ||
79 | TRACE_FLAG_IRQS_OFF = 0x01, | ||
80 | TRACE_FLAG_NEED_RESCHED = 0x02, | ||
81 | TRACE_FLAG_HARDIRQ = 0x04, | ||
82 | TRACE_FLAG_SOFTIRQ = 0x08, | ||
83 | }; | ||
84 | |||
85 | enum trace_iterator_flags { | ||
86 | TRACE_ITER_PRINT_PARENT = 0x01, | ||
87 | TRACE_ITER_SYM_OFFSET = 0x02, | ||
88 | TRACE_ITER_SYM_ADDR = 0x04, | ||
89 | TRACE_ITER_VERBOSE = 0x08, | ||
90 | }; | ||
91 | |||
92 | #define TRACE_ITER_SYM_MASK \ | ||
93 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
94 | |||
95 | /* These must match the bit postions above */ | ||
96 | static const char *trace_options[] = { | ||
97 | "print-parent", | ||
98 | "sym-offset", | ||
99 | "sym-addr", | ||
100 | "verbose", | ||
101 | NULL | ||
102 | }; | ||
103 | |||
104 | static unsigned trace_flags; | ||
105 | |||
106 | |||
107 | /* | ||
108 | * Copy the new maximum trace into the separate maximum-trace | ||
109 | * structure. (this way the maximum trace is permanently saved, | ||
110 | * for later retrieval via /debugfs/tracing/latency_trace) | ||
111 | */ | ||
112 | static void notrace | ||
113 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
114 | { | ||
115 | struct trace_array_cpu *data = tr->data[cpu]; | ||
116 | |||
117 | max_tr.cpu = cpu; | ||
118 | max_tr.time_start = data->preempt_timestamp; | ||
119 | |||
120 | data = max_tr.data[cpu]; | ||
121 | data->saved_latency = tracing_max_latency; | ||
122 | |||
123 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | ||
124 | data->pid = tsk->pid; | ||
125 | data->uid = tsk->uid; | ||
126 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | ||
127 | data->policy = tsk->policy; | ||
128 | data->rt_priority = tsk->rt_priority; | ||
129 | |||
130 | /* record this tasks comm */ | ||
131 | tracing_record_cmdline(current); | ||
132 | } | ||
133 | |||
134 | notrace void | ||
135 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
136 | { | ||
137 | struct trace_array_cpu *data; | ||
138 | void *save_trace; | ||
139 | int i; | ||
140 | |||
141 | /* clear out all the previous traces */ | ||
142 | for_each_possible_cpu(i) { | ||
143 | data = tr->data[i]; | ||
144 | save_trace = max_tr.data[i]->trace; | ||
145 | memcpy(max_tr.data[i], data, sizeof(*data)); | ||
146 | data->trace = save_trace; | ||
147 | } | ||
148 | |||
149 | __update_max_tr(tr, tsk, cpu); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * update_max_tr_single - only copy one trace over, and reset the rest | ||
154 | * @tr - tracer | ||
155 | * @tsk - task with the latency | ||
156 | * @cpu - the cpu of the buffer to copy. | ||
157 | */ | ||
158 | notrace void | ||
159 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
160 | { | ||
161 | struct trace_array_cpu *data = tr->data[cpu]; | ||
162 | void *save_trace; | ||
163 | int i; | ||
164 | |||
165 | for_each_possible_cpu(i) | ||
166 | tracing_reset(max_tr.data[i]); | ||
167 | |||
168 | save_trace = max_tr.data[cpu]->trace; | ||
169 | memcpy(max_tr.data[cpu], data, sizeof(*data)); | ||
170 | data->trace = save_trace; | ||
171 | |||
172 | __update_max_tr(tr, tsk, cpu); | ||
173 | } | ||
174 | |||
175 | int register_tracer(struct tracer *type) | ||
176 | { | ||
177 | struct tracer *t; | ||
178 | int len; | ||
179 | int ret = 0; | ||
180 | |||
181 | if (!type->name) { | ||
182 | pr_info("Tracer must have a name\n"); | ||
183 | return -1; | ||
184 | } | ||
185 | |||
186 | mutex_lock(&trace_types_lock); | ||
187 | for (t = trace_types; t; t = t->next) { | ||
188 | if (strcmp(type->name, t->name) == 0) { | ||
189 | /* already found */ | ||
190 | pr_info("Trace %s already registered\n", | ||
191 | type->name); | ||
192 | ret = -1; | ||
193 | goto out; | ||
194 | } | ||
195 | } | ||
196 | |||
197 | type->next = trace_types; | ||
198 | trace_types = type; | ||
199 | len = strlen(type->name); | ||
200 | if (len > max_tracer_type_len) | ||
201 | max_tracer_type_len = len; | ||
202 | out: | ||
203 | mutex_unlock(&trace_types_lock); | ||
204 | |||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | void unregister_tracer(struct tracer *type) | ||
209 | { | ||
210 | struct tracer **t; | ||
211 | int len; | ||
212 | |||
213 | mutex_lock(&trace_types_lock); | ||
214 | for (t = &trace_types; *t; t = &(*t)->next) { | ||
215 | if (*t == type) | ||
216 | goto found; | ||
217 | } | ||
218 | pr_info("Trace %s not registered\n", type->name); | ||
219 | goto out; | ||
220 | |||
221 | found: | ||
222 | *t = (*t)->next; | ||
223 | if (strlen(type->name) != max_tracer_type_len) | ||
224 | goto out; | ||
225 | |||
226 | max_tracer_type_len = 0; | ||
227 | for (t = &trace_types; *t; t = &(*t)->next) { | ||
228 | len = strlen((*t)->name); | ||
229 | if (len > max_tracer_type_len) | ||
230 | max_tracer_type_len = len; | ||
231 | } | ||
232 | out: | ||
233 | mutex_unlock(&trace_types_lock); | ||
234 | } | ||
235 | |||
236 | void notrace tracing_reset(struct trace_array_cpu *data) | ||
237 | { | ||
238 | data->trace_idx = 0; | ||
239 | atomic_set(&data->underrun, 0); | ||
240 | } | ||
241 | |||
242 | #ifdef CONFIG_FTRACE | ||
243 | static void notrace | ||
244 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
245 | { | ||
246 | struct trace_array *tr = &global_trace; | ||
247 | struct trace_array_cpu *data; | ||
248 | unsigned long flags; | ||
249 | long disabled; | ||
250 | int cpu; | ||
251 | |||
252 | if (unlikely(!tracer_enabled)) | ||
253 | return; | ||
254 | |||
255 | raw_local_irq_save(flags); | ||
256 | cpu = raw_smp_processor_id(); | ||
257 | data = tr->data[cpu]; | ||
258 | disabled = atomic_inc_return(&data->disabled); | ||
259 | |||
260 | if (likely(disabled == 1)) | ||
261 | ftrace(tr, data, ip, parent_ip, flags); | ||
262 | |||
263 | atomic_dec(&data->disabled); | ||
264 | raw_local_irq_restore(flags); | ||
265 | } | ||
266 | |||
267 | static struct ftrace_ops trace_ops __read_mostly = | ||
268 | { | ||
269 | .func = function_trace_call, | ||
270 | }; | ||
271 | #endif | ||
272 | |||
273 | notrace void tracing_start_function_trace(void) | ||
274 | { | ||
275 | register_ftrace_function(&trace_ops); | ||
276 | } | ||
277 | |||
278 | notrace void tracing_stop_function_trace(void) | ||
279 | { | ||
280 | unregister_ftrace_function(&trace_ops); | ||
281 | } | ||
282 | |||
283 | #define SAVED_CMDLINES 128 | ||
284 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | ||
285 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | ||
286 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | ||
287 | static int cmdline_idx; | ||
288 | static DEFINE_SPINLOCK(trace_cmdline_lock); | ||
289 | atomic_t trace_record_cmdline_disabled; | ||
290 | |||
291 | static void trace_init_cmdlines(void) | ||
292 | { | ||
293 | memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); | ||
294 | memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); | ||
295 | cmdline_idx = 0; | ||
296 | } | ||
297 | |||
298 | notrace void trace_stop_cmdline_recording(void); | ||
299 | |||
300 | static void notrace trace_save_cmdline(struct task_struct *tsk) | ||
301 | { | ||
302 | unsigned map; | ||
303 | unsigned idx; | ||
304 | |||
305 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | ||
306 | return; | ||
307 | |||
308 | /* | ||
309 | * It's not the end of the world if we don't get | ||
310 | * the lock, but we also don't want to spin | ||
311 | * nor do we want to disable interrupts, | ||
312 | * so if we miss here, then better luck next time. | ||
313 | */ | ||
314 | if (!spin_trylock(&trace_cmdline_lock)) | ||
315 | return; | ||
316 | |||
317 | idx = map_pid_to_cmdline[tsk->pid]; | ||
318 | if (idx >= SAVED_CMDLINES) { | ||
319 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; | ||
320 | |||
321 | map = map_cmdline_to_pid[idx]; | ||
322 | if (map <= PID_MAX_DEFAULT) | ||
323 | map_pid_to_cmdline[map] = (unsigned)-1; | ||
324 | |||
325 | map_pid_to_cmdline[tsk->pid] = idx; | ||
326 | |||
327 | cmdline_idx = idx; | ||
328 | } | ||
329 | |||
330 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | ||
331 | |||
332 | spin_unlock(&trace_cmdline_lock); | ||
333 | } | ||
334 | |||
335 | static notrace char *trace_find_cmdline(int pid) | ||
336 | { | ||
337 | char *cmdline = "<...>"; | ||
338 | unsigned map; | ||
339 | |||
340 | if (!pid) | ||
341 | return "<idle>"; | ||
342 | |||
343 | if (pid > PID_MAX_DEFAULT) | ||
344 | goto out; | ||
345 | |||
346 | map = map_pid_to_cmdline[pid]; | ||
347 | if (map >= SAVED_CMDLINES) | ||
348 | goto out; | ||
349 | |||
350 | cmdline = saved_cmdlines[map]; | ||
351 | |||
352 | out: | ||
353 | return cmdline; | ||
354 | } | ||
355 | |||
356 | notrace void tracing_record_cmdline(struct task_struct *tsk) | ||
357 | { | ||
358 | if (atomic_read(&trace_record_cmdline_disabled)) | ||
359 | return; | ||
360 | |||
361 | trace_save_cmdline(tsk); | ||
362 | } | ||
363 | |||
364 | static inline notrace struct trace_entry * | ||
365 | tracing_get_trace_entry(struct trace_array *tr, | ||
366 | struct trace_array_cpu *data) | ||
367 | { | ||
368 | unsigned long idx, idx_next; | ||
369 | struct trace_entry *entry; | ||
370 | |||
371 | idx = data->trace_idx; | ||
372 | idx_next = idx + 1; | ||
373 | |||
374 | if (unlikely(idx_next >= tr->entries)) { | ||
375 | atomic_inc(&data->underrun); | ||
376 | idx_next = 0; | ||
377 | } | ||
378 | |||
379 | data->trace_idx = idx_next; | ||
380 | |||
381 | if (unlikely(idx_next != 0 && atomic_read(&data->underrun))) | ||
382 | atomic_inc(&data->underrun); | ||
383 | |||
384 | entry = data->trace + idx * TRACE_ENTRY_SIZE; | ||
385 | |||
386 | return entry; | ||
387 | } | ||
388 | |||
389 | static inline notrace void | ||
390 | tracing_generic_entry_update(struct trace_entry *entry, | ||
391 | unsigned long flags) | ||
392 | { | ||
393 | struct task_struct *tsk = current; | ||
394 | unsigned long pc; | ||
395 | |||
396 | pc = preempt_count(); | ||
397 | |||
398 | entry->idx = atomic_inc_return(&tracer_counter); | ||
399 | entry->preempt_count = pc & 0xff; | ||
400 | entry->pid = tsk->pid; | ||
401 | entry->t = now(raw_smp_processor_id()); | ||
402 | entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | ||
403 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | ||
404 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | ||
405 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | ||
406 | } | ||
407 | |||
408 | notrace void | ||
409 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | ||
410 | unsigned long ip, unsigned long parent_ip, | ||
411 | unsigned long flags) | ||
412 | { | ||
413 | struct trace_entry *entry; | ||
414 | |||
415 | entry = tracing_get_trace_entry(tr, data); | ||
416 | tracing_generic_entry_update(entry, flags); | ||
417 | entry->type = TRACE_FN; | ||
418 | entry->fn.ip = ip; | ||
419 | entry->fn.parent_ip = parent_ip; | ||
420 | } | ||
421 | |||
422 | notrace void | ||
423 | tracing_sched_switch_trace(struct trace_array *tr, | ||
424 | struct trace_array_cpu *data, | ||
425 | struct task_struct *prev, struct task_struct *next, | ||
426 | unsigned long flags) | ||
427 | { | ||
428 | struct trace_entry *entry; | ||
429 | |||
430 | entry = tracing_get_trace_entry(tr, data); | ||
431 | tracing_generic_entry_update(entry, flags); | ||
432 | entry->type = TRACE_CTX; | ||
433 | entry->ctx.prev_pid = prev->pid; | ||
434 | entry->ctx.prev_prio = prev->prio; | ||
435 | entry->ctx.prev_state = prev->state; | ||
436 | entry->ctx.next_pid = next->pid; | ||
437 | entry->ctx.next_prio = next->prio; | ||
438 | } | ||
439 | |||
440 | enum trace_file_type { | ||
441 | TRACE_FILE_LAT_FMT = 1, | ||
442 | }; | ||
443 | |||
444 | static struct trace_entry * | ||
445 | trace_entry_idx(struct trace_array *tr, unsigned long idx, int cpu) | ||
446 | { | ||
447 | struct trace_entry *array = tr->data[cpu]->trace; | ||
448 | unsigned long underrun; | ||
449 | |||
450 | if (idx >= tr->entries) | ||
451 | return NULL; | ||
452 | |||
453 | underrun = atomic_read(&tr->data[cpu]->underrun); | ||
454 | if (underrun) | ||
455 | idx = ((underrun - 1) + idx) % tr->entries; | ||
456 | else if (idx >= tr->data[cpu]->trace_idx) | ||
457 | return NULL; | ||
458 | |||
459 | return &array[idx]; | ||
460 | } | ||
461 | |||
462 | static struct notrace trace_entry * | ||
463 | find_next_entry(struct trace_iterator *iter, int *ent_cpu) | ||
464 | { | ||
465 | struct trace_array *tr = iter->tr; | ||
466 | struct trace_entry *ent, *next = NULL; | ||
467 | int next_cpu = -1; | ||
468 | int cpu; | ||
469 | |||
470 | for_each_possible_cpu(cpu) { | ||
471 | if (!tr->data[cpu]->trace) | ||
472 | continue; | ||
473 | ent = trace_entry_idx(tr, iter->next_idx[cpu], cpu); | ||
474 | if (ent && | ||
475 | (!next || (long)(next->idx - ent->idx) > 0)) { | ||
476 | next = ent; | ||
477 | next_cpu = cpu; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | if (ent_cpu) | ||
482 | *ent_cpu = next_cpu; | ||
483 | |||
484 | return next; | ||
485 | } | ||
486 | |||
487 | static void *find_next_entry_inc(struct trace_iterator *iter) | ||
488 | { | ||
489 | struct trace_entry *next; | ||
490 | int next_cpu = -1; | ||
491 | |||
492 | next = find_next_entry(iter, &next_cpu); | ||
493 | |||
494 | if (next) { | ||
495 | iter->next_idx[next_cpu]++; | ||
496 | iter->idx++; | ||
497 | } | ||
498 | iter->ent = next; | ||
499 | iter->cpu = next_cpu; | ||
500 | |||
501 | return next ? iter : NULL; | ||
502 | } | ||
503 | |||
504 | static void notrace * | ||
505 | s_next(struct seq_file *m, void *v, loff_t *pos) | ||
506 | { | ||
507 | struct trace_iterator *iter = m->private; | ||
508 | void *ent; | ||
509 | void *last_ent = iter->ent; | ||
510 | int i = (int)*pos; | ||
511 | |||
512 | (*pos)++; | ||
513 | |||
514 | /* can't go backwards */ | ||
515 | if (iter->idx > i) | ||
516 | return NULL; | ||
517 | |||
518 | if (iter->idx < 0) | ||
519 | ent = find_next_entry_inc(iter); | ||
520 | else | ||
521 | ent = iter; | ||
522 | |||
523 | while (ent && iter->idx < i) | ||
524 | ent = find_next_entry_inc(iter); | ||
525 | |||
526 | iter->pos = *pos; | ||
527 | |||
528 | if (last_ent && !ent) | ||
529 | seq_puts(m, "\n\nvim:ft=help\n"); | ||
530 | |||
531 | return ent; | ||
532 | } | ||
533 | |||
534 | static void *s_start(struct seq_file *m, loff_t *pos) | ||
535 | { | ||
536 | struct trace_iterator *iter = m->private; | ||
537 | void *p = NULL; | ||
538 | loff_t l = 0; | ||
539 | int i; | ||
540 | |||
541 | mutex_lock(&trace_types_lock); | ||
542 | |||
543 | if (!current_trace || current_trace != iter->trace) | ||
544 | return NULL; | ||
545 | |||
546 | atomic_inc(&trace_record_cmdline_disabled); | ||
547 | |||
548 | /* let the tracer grab locks here if needed */ | ||
549 | if (current_trace->start) | ||
550 | current_trace->start(iter); | ||
551 | |||
552 | if (*pos != iter->pos) { | ||
553 | iter->ent = NULL; | ||
554 | iter->cpu = 0; | ||
555 | iter->idx = -1; | ||
556 | |||
557 | for (i = 0; i < NR_CPUS; i++) | ||
558 | iter->next_idx[i] = 0; | ||
559 | |||
560 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) | ||
561 | ; | ||
562 | |||
563 | } else { | ||
564 | l = *pos; | ||
565 | p = s_next(m, p, &l); | ||
566 | } | ||
567 | |||
568 | return p; | ||
569 | } | ||
570 | |||
571 | static void s_stop(struct seq_file *m, void *p) | ||
572 | { | ||
573 | struct trace_iterator *iter = m->private; | ||
574 | |||
575 | atomic_dec(&trace_record_cmdline_disabled); | ||
576 | |||
577 | /* let the tracer release locks here if needed */ | ||
578 | if (current_trace && current_trace == iter->trace && iter->trace->stop) | ||
579 | iter->trace->stop(iter); | ||
580 | |||
581 | mutex_unlock(&trace_types_lock); | ||
582 | } | ||
583 | |||
584 | static void | ||
585 | seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address) | ||
586 | { | ||
587 | #ifdef CONFIG_KALLSYMS | ||
588 | char str[KSYM_SYMBOL_LEN]; | ||
589 | |||
590 | kallsyms_lookup(address, NULL, NULL, NULL, str); | ||
591 | |||
592 | seq_printf(m, fmt, str); | ||
593 | #endif | ||
594 | } | ||
595 | |||
596 | static void | ||
597 | seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address) | ||
598 | { | ||
599 | #ifdef CONFIG_KALLSYMS | ||
600 | char str[KSYM_SYMBOL_LEN]; | ||
601 | |||
602 | sprint_symbol(str, address); | ||
603 | seq_printf(m, fmt, str); | ||
604 | #endif | ||
605 | } | ||
606 | |||
607 | #ifndef CONFIG_64BIT | ||
608 | # define IP_FMT "%08lx" | ||
609 | #else | ||
610 | # define IP_FMT "%016lx" | ||
611 | #endif | ||
612 | |||
613 | static void notrace | ||
614 | seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags) | ||
615 | { | ||
616 | if (!ip) { | ||
617 | seq_printf(m, "0"); | ||
618 | return; | ||
619 | } | ||
620 | |||
621 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | ||
622 | seq_print_sym_offset(m, "%s", ip); | ||
623 | else | ||
624 | seq_print_sym_short(m, "%s", ip); | ||
625 | |||
626 | if (sym_flags & TRACE_ITER_SYM_ADDR) | ||
627 | seq_printf(m, " <" IP_FMT ">", ip); | ||
628 | } | ||
629 | |||
630 | static void notrace print_lat_help_header(struct seq_file *m) | ||
631 | { | ||
632 | seq_puts(m, "# _------=> CPU# \n"); | ||
633 | seq_puts(m, "# / _-----=> irqs-off \n"); | ||
634 | seq_puts(m, "# | / _----=> need-resched \n"); | ||
635 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | ||
636 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | ||
637 | seq_puts(m, "# |||| / \n"); | ||
638 | seq_puts(m, "# ||||| delay \n"); | ||
639 | seq_puts(m, "# cmd pid ||||| time | caller \n"); | ||
640 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | ||
641 | } | ||
642 | |||
643 | static void notrace print_func_help_header(struct seq_file *m) | ||
644 | { | ||
645 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); | ||
646 | seq_puts(m, "# | | | | |\n"); | ||
647 | } | ||
648 | |||
649 | |||
650 | static void notrace | ||
651 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) | ||
652 | { | ||
653 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | ||
654 | struct trace_array *tr = iter->tr; | ||
655 | struct trace_array_cpu *data = tr->data[tr->cpu]; | ||
656 | struct tracer *type = current_trace; | ||
657 | unsigned long underruns = 0; | ||
658 | unsigned long underrun; | ||
659 | unsigned long entries = 0; | ||
660 | int cpu; | ||
661 | const char *name = "preemption"; | ||
662 | |||
663 | if (type) | ||
664 | name = type->name; | ||
665 | |||
666 | for_each_possible_cpu(cpu) { | ||
667 | if (tr->data[cpu]->trace) { | ||
668 | underrun = atomic_read(&tr->data[cpu]->underrun); | ||
669 | if (underrun) { | ||
670 | underruns += underrun; | ||
671 | entries += tr->entries; | ||
672 | } else | ||
673 | entries += tr->data[cpu]->trace_idx; | ||
674 | } | ||
675 | } | ||
676 | |||
677 | seq_printf(m, "%s latency trace v1.1.5 on %s\n", | ||
678 | name, UTS_RELEASE); | ||
679 | seq_puts(m, "-----------------------------------" | ||
680 | "---------------------------------\n"); | ||
681 | seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |" | ||
682 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | ||
683 | data->saved_latency, | ||
684 | entries, | ||
685 | (entries + underruns), | ||
686 | tr->cpu, | ||
687 | #if defined(CONFIG_PREEMPT_NONE) | ||
688 | "server", | ||
689 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) | ||
690 | "desktop", | ||
691 | #elif defined(CONFIG_PREEMPT_DESKTOP) | ||
692 | "preempt", | ||
693 | #else | ||
694 | "unknown", | ||
695 | #endif | ||
696 | /* These are reserved for later use */ | ||
697 | 0, 0, 0, 0); | ||
698 | #ifdef CONFIG_SMP | ||
699 | seq_printf(m, " #P:%d)\n", num_online_cpus()); | ||
700 | #else | ||
701 | seq_puts(m, ")\n"); | ||
702 | #endif | ||
703 | seq_puts(m, " -----------------\n"); | ||
704 | seq_printf(m, " | task: %.16s-%d " | ||
705 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | ||
706 | data->comm, data->pid, data->uid, data->nice, | ||
707 | data->policy, data->rt_priority); | ||
708 | seq_puts(m, " -----------------\n"); | ||
709 | |||
710 | if (data->critical_start) { | ||
711 | seq_puts(m, " => started at: "); | ||
712 | seq_print_ip_sym(m, data->critical_start, sym_flags); | ||
713 | seq_puts(m, "\n => ended at: "); | ||
714 | seq_print_ip_sym(m, data->critical_end, sym_flags); | ||
715 | seq_puts(m, "\n"); | ||
716 | } | ||
717 | |||
718 | seq_puts(m, "\n"); | ||
719 | } | ||
720 | |||
721 | unsigned long nsecs_to_usecs(unsigned long nsecs) | ||
722 | { | ||
723 | return nsecs / 1000; | ||
724 | } | ||
725 | |||
726 | static void notrace | ||
727 | lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu) | ||
728 | { | ||
729 | int hardirq, softirq; | ||
730 | char *comm; | ||
731 | |||
732 | comm = trace_find_cmdline(entry->pid); | ||
733 | |||
734 | seq_printf(m, "%8.8s-%-5d ", comm, entry->pid); | ||
735 | seq_printf(m, "%d", cpu); | ||
736 | seq_printf(m, "%c%c", | ||
737 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', | ||
738 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); | ||
739 | |||
740 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | ||
741 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | ||
742 | if (hardirq && softirq) | ||
743 | seq_putc(m, 'H'); | ||
744 | else { | ||
745 | if (hardirq) | ||
746 | seq_putc(m, 'h'); | ||
747 | else { | ||
748 | if (softirq) | ||
749 | seq_putc(m, 's'); | ||
750 | else | ||
751 | seq_putc(m, '.'); | ||
752 | } | ||
753 | } | ||
754 | |||
755 | if (entry->preempt_count) | ||
756 | seq_printf(m, "%x", entry->preempt_count); | ||
757 | else | ||
758 | seq_puts(m, "."); | ||
759 | } | ||
760 | |||
761 | unsigned long preempt_mark_thresh = 100; | ||
762 | |||
763 | static void notrace | ||
764 | lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs, | ||
765 | unsigned long rel_usecs) | ||
766 | { | ||
767 | seq_printf(m, " %4lldus", abs_usecs); | ||
768 | if (rel_usecs > preempt_mark_thresh) | ||
769 | seq_puts(m, "!: "); | ||
770 | else if (rel_usecs > 1) | ||
771 | seq_puts(m, "+: "); | ||
772 | else | ||
773 | seq_puts(m, " : "); | ||
774 | } | ||
775 | |||
776 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | ||
777 | |||
778 | static void notrace | ||
779 | print_lat_fmt(struct seq_file *m, struct trace_iterator *iter, | ||
780 | unsigned int trace_idx, int cpu) | ||
781 | { | ||
782 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | ||
783 | struct trace_entry *next_entry = find_next_entry(iter, NULL); | ||
784 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | ||
785 | struct trace_entry *entry = iter->ent; | ||
786 | unsigned long abs_usecs; | ||
787 | unsigned long rel_usecs; | ||
788 | char *comm; | ||
789 | int S; | ||
790 | |||
791 | if (!next_entry) | ||
792 | next_entry = entry; | ||
793 | rel_usecs = ns2usecs(next_entry->t - entry->t); | ||
794 | abs_usecs = ns2usecs(entry->t - iter->tr->time_start); | ||
795 | |||
796 | if (verbose) { | ||
797 | comm = trace_find_cmdline(entry->pid); | ||
798 | seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]" | ||
799 | " %ld.%03ldms (+%ld.%03ldms): ", | ||
800 | comm, | ||
801 | entry->pid, cpu, entry->flags, | ||
802 | entry->preempt_count, trace_idx, | ||
803 | ns2usecs(entry->t), | ||
804 | abs_usecs/1000, | ||
805 | abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000); | ||
806 | } else { | ||
807 | lat_print_generic(m, entry, cpu); | ||
808 | lat_print_timestamp(m, abs_usecs, rel_usecs); | ||
809 | } | ||
810 | switch (entry->type) { | ||
811 | case TRACE_FN: | ||
812 | seq_print_ip_sym(m, entry->fn.ip, sym_flags); | ||
813 | seq_puts(m, " ("); | ||
814 | seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags); | ||
815 | seq_puts(m, ")\n"); | ||
816 | break; | ||
817 | case TRACE_CTX: | ||
818 | S = entry->ctx.prev_state < sizeof(state_to_char) ? | ||
819 | state_to_char[entry->ctx.prev_state] : 'X'; | ||
820 | comm = trace_find_cmdline(entry->ctx.next_pid); | ||
821 | seq_printf(m, " %d:%d:%c --> %d:%d %s\n", | ||
822 | entry->ctx.prev_pid, | ||
823 | entry->ctx.prev_prio, | ||
824 | S, | ||
825 | entry->ctx.next_pid, | ||
826 | entry->ctx.next_prio, | ||
827 | comm); | ||
828 | break; | ||
829 | } | ||
830 | } | ||
831 | |||
832 | static void notrace | ||
833 | print_trace_fmt(struct seq_file *m, struct trace_iterator *iter) | ||
834 | { | ||
835 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | ||
836 | struct trace_entry *entry = iter->ent; | ||
837 | unsigned long usec_rem; | ||
838 | unsigned long long t; | ||
839 | unsigned long secs; | ||
840 | char *comm; | ||
841 | int S; | ||
842 | |||
843 | comm = trace_find_cmdline(iter->ent->pid); | ||
844 | |||
845 | t = ns2usecs(entry->t); | ||
846 | usec_rem = do_div(t, 1000000ULL); | ||
847 | secs = (unsigned long)t; | ||
848 | |||
849 | seq_printf(m, "%16s-%-5d ", comm, entry->pid); | ||
850 | seq_printf(m, "[%02d] ", iter->cpu); | ||
851 | seq_printf(m, "%5lu.%06lu: ", secs, usec_rem); | ||
852 | |||
853 | switch (entry->type) { | ||
854 | case TRACE_FN: | ||
855 | seq_print_ip_sym(m, entry->fn.ip, sym_flags); | ||
856 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && | ||
857 | entry->fn.parent_ip) { | ||
858 | seq_printf(m, " <-"); | ||
859 | seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags); | ||
860 | } | ||
861 | break; | ||
862 | case TRACE_CTX: | ||
863 | S = entry->ctx.prev_state < sizeof(state_to_char) ? | ||
864 | state_to_char[entry->ctx.prev_state] : 'X'; | ||
865 | seq_printf(m, " %d:%d:%c ==> %d:%d\n", | ||
866 | entry->ctx.prev_pid, | ||
867 | entry->ctx.prev_prio, | ||
868 | S, | ||
869 | entry->ctx.next_pid, | ||
870 | entry->ctx.next_prio); | ||
871 | break; | ||
872 | } | ||
873 | seq_printf(m, "\n"); | ||
874 | } | ||
875 | |||
876 | static int trace_empty(struct trace_iterator *iter) | ||
877 | { | ||
878 | struct trace_array_cpu *data; | ||
879 | int cpu; | ||
880 | |||
881 | for_each_possible_cpu(cpu) { | ||
882 | data = iter->tr->data[cpu]; | ||
883 | |||
884 | if (data->trace && | ||
885 | (data->trace_idx || | ||
886 | atomic_read(&data->underrun))) | ||
887 | return 0; | ||
888 | } | ||
889 | return 1; | ||
890 | } | ||
891 | |||
892 | static int s_show(struct seq_file *m, void *v) | ||
893 | { | ||
894 | struct trace_iterator *iter = v; | ||
895 | |||
896 | if (iter->ent == NULL) { | ||
897 | if (iter->tr) { | ||
898 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | ||
899 | seq_puts(m, "#\n"); | ||
900 | } | ||
901 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
902 | /* print nothing if the buffers are empty */ | ||
903 | if (trace_empty(iter)) | ||
904 | return 0; | ||
905 | print_trace_header(m, iter); | ||
906 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
907 | print_lat_help_header(m); | ||
908 | } else { | ||
909 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | ||
910 | print_func_help_header(m); | ||
911 | } | ||
912 | } else { | ||
913 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) | ||
914 | print_lat_fmt(m, iter, iter->idx, iter->cpu); | ||
915 | else | ||
916 | print_trace_fmt(m, iter); | ||
917 | } | ||
918 | |||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | static struct seq_operations tracer_seq_ops = { | ||
923 | .start = s_start, | ||
924 | .next = s_next, | ||
925 | .stop = s_stop, | ||
926 | .show = s_show, | ||
927 | }; | ||
928 | |||
929 | static struct trace_iterator notrace * | ||
930 | __tracing_open(struct inode *inode, struct file *file, int *ret) | ||
931 | { | ||
932 | struct trace_iterator *iter; | ||
933 | |||
934 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
935 | if (!iter) { | ||
936 | *ret = -ENOMEM; | ||
937 | goto out; | ||
938 | } | ||
939 | |||
940 | mutex_lock(&trace_types_lock); | ||
941 | if (current_trace && current_trace->print_max) | ||
942 | iter->tr = &max_tr; | ||
943 | else | ||
944 | iter->tr = inode->i_private; | ||
945 | iter->trace = current_trace; | ||
946 | iter->pos = -1; | ||
947 | |||
948 | /* TODO stop tracer */ | ||
949 | *ret = seq_open(file, &tracer_seq_ops); | ||
950 | if (!*ret) { | ||
951 | struct seq_file *m = file->private_data; | ||
952 | m->private = iter; | ||
953 | |||
954 | /* stop the trace while dumping */ | ||
955 | if (iter->tr->ctrl) | ||
956 | tracer_enabled = 0; | ||
957 | |||
958 | if (iter->trace && iter->trace->open) | ||
959 | iter->trace->open(iter); | ||
960 | } else { | ||
961 | kfree(iter); | ||
962 | iter = NULL; | ||
963 | } | ||
964 | mutex_unlock(&trace_types_lock); | ||
965 | |||
966 | out: | ||
967 | return iter; | ||
968 | } | ||
969 | |||
970 | int tracing_open_generic(struct inode *inode, struct file *filp) | ||
971 | { | ||
972 | filp->private_data = inode->i_private; | ||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | int tracing_release(struct inode *inode, struct file *file) | ||
977 | { | ||
978 | struct seq_file *m = (struct seq_file *)file->private_data; | ||
979 | struct trace_iterator *iter = m->private; | ||
980 | |||
981 | mutex_lock(&trace_types_lock); | ||
982 | if (iter->trace && iter->trace->close) | ||
983 | iter->trace->close(iter); | ||
984 | |||
985 | /* reenable tracing if it was previously enabled */ | ||
986 | if (iter->tr->ctrl) | ||
987 | tracer_enabled = 1; | ||
988 | mutex_unlock(&trace_types_lock); | ||
989 | |||
990 | seq_release(inode, file); | ||
991 | kfree(iter); | ||
992 | return 0; | ||
993 | } | ||
994 | |||
995 | static int tracing_open(struct inode *inode, struct file *file) | ||
996 | { | ||
997 | int ret; | ||
998 | |||
999 | __tracing_open(inode, file, &ret); | ||
1000 | |||
1001 | return ret; | ||
1002 | } | ||
1003 | |||
1004 | static int tracing_lt_open(struct inode *inode, struct file *file) | ||
1005 | { | ||
1006 | struct trace_iterator *iter; | ||
1007 | int ret; | ||
1008 | |||
1009 | iter = __tracing_open(inode, file, &ret); | ||
1010 | |||
1011 | if (!ret) | ||
1012 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | ||
1013 | |||
1014 | return ret; | ||
1015 | } | ||
1016 | |||
1017 | |||
1018 | static void notrace * | ||
1019 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
1020 | { | ||
1021 | struct tracer *t = m->private; | ||
1022 | |||
1023 | (*pos)++; | ||
1024 | |||
1025 | if (t) | ||
1026 | t = t->next; | ||
1027 | |||
1028 | m->private = t; | ||
1029 | |||
1030 | return t; | ||
1031 | } | ||
1032 | |||
1033 | static void *t_start(struct seq_file *m, loff_t *pos) | ||
1034 | { | ||
1035 | struct tracer *t = m->private; | ||
1036 | loff_t l = 0; | ||
1037 | |||
1038 | mutex_lock(&trace_types_lock); | ||
1039 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
1040 | ; | ||
1041 | |||
1042 | return t; | ||
1043 | } | ||
1044 | |||
1045 | static void t_stop(struct seq_file *m, void *p) | ||
1046 | { | ||
1047 | mutex_unlock(&trace_types_lock); | ||
1048 | } | ||
1049 | |||
1050 | static int t_show(struct seq_file *m, void *v) | ||
1051 | { | ||
1052 | struct tracer *t = v; | ||
1053 | |||
1054 | if (!t) | ||
1055 | return 0; | ||
1056 | |||
1057 | seq_printf(m, "%s", t->name); | ||
1058 | if (t->next) | ||
1059 | seq_putc(m, ' '); | ||
1060 | else | ||
1061 | seq_putc(m, '\n'); | ||
1062 | |||
1063 | return 0; | ||
1064 | } | ||
1065 | |||
1066 | static struct seq_operations show_traces_seq_ops = { | ||
1067 | .start = t_start, | ||
1068 | .next = t_next, | ||
1069 | .stop = t_stop, | ||
1070 | .show = t_show, | ||
1071 | }; | ||
1072 | |||
1073 | static int show_traces_open(struct inode *inode, struct file *file) | ||
1074 | { | ||
1075 | int ret; | ||
1076 | |||
1077 | ret = seq_open(file, &show_traces_seq_ops); | ||
1078 | if (!ret) { | ||
1079 | struct seq_file *m = file->private_data; | ||
1080 | m->private = trace_types; | ||
1081 | } | ||
1082 | |||
1083 | return ret; | ||
1084 | } | ||
1085 | |||
1086 | static struct file_operations tracing_fops = { | ||
1087 | .open = tracing_open, | ||
1088 | .read = seq_read, | ||
1089 | .llseek = seq_lseek, | ||
1090 | .release = tracing_release, | ||
1091 | }; | ||
1092 | |||
1093 | static struct file_operations tracing_lt_fops = { | ||
1094 | .open = tracing_lt_open, | ||
1095 | .read = seq_read, | ||
1096 | .llseek = seq_lseek, | ||
1097 | .release = tracing_release, | ||
1098 | }; | ||
1099 | |||
1100 | static struct file_operations show_traces_fops = { | ||
1101 | .open = show_traces_open, | ||
1102 | .read = seq_read, | ||
1103 | .release = seq_release, | ||
1104 | }; | ||
1105 | |||
1106 | static ssize_t | ||
1107 | tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | ||
1108 | size_t cnt, loff_t *ppos) | ||
1109 | { | ||
1110 | char *buf; | ||
1111 | int r = 0; | ||
1112 | int len = 0; | ||
1113 | int i; | ||
1114 | |||
1115 | /* calulate max size */ | ||
1116 | for (i = 0; trace_options[i]; i++) { | ||
1117 | len += strlen(trace_options[i]); | ||
1118 | len += 3; /* "no" and space */ | ||
1119 | } | ||
1120 | |||
1121 | /* +2 for \n and \0 */ | ||
1122 | buf = kmalloc(len + 2, GFP_KERNEL); | ||
1123 | if (!buf) | ||
1124 | return -ENOMEM; | ||
1125 | |||
1126 | for (i = 0; trace_options[i]; i++) { | ||
1127 | if (trace_flags & (1 << i)) | ||
1128 | r += sprintf(buf + r, "%s ", trace_options[i]); | ||
1129 | else | ||
1130 | r += sprintf(buf + r, "no%s ", trace_options[i]); | ||
1131 | } | ||
1132 | |||
1133 | r += sprintf(buf + r, "\n"); | ||
1134 | WARN_ON(r >= len + 2); | ||
1135 | |||
1136 | r = simple_read_from_buffer(ubuf, cnt, ppos, | ||
1137 | buf, r); | ||
1138 | |||
1139 | kfree(buf); | ||
1140 | |||
1141 | return r; | ||
1142 | } | ||
1143 | |||
1144 | static ssize_t | ||
1145 | tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | ||
1146 | size_t cnt, loff_t *ppos) | ||
1147 | { | ||
1148 | char buf[64]; | ||
1149 | char *cmp = buf; | ||
1150 | int neg = 0; | ||
1151 | int i; | ||
1152 | |||
1153 | if (cnt > 63) | ||
1154 | cnt = 63; | ||
1155 | |||
1156 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1157 | return -EFAULT; | ||
1158 | |||
1159 | buf[cnt] = 0; | ||
1160 | |||
1161 | if (strncmp(buf, "no", 2) == 0) { | ||
1162 | neg = 1; | ||
1163 | cmp += 2; | ||
1164 | } | ||
1165 | |||
1166 | for (i = 0; trace_options[i]; i++) { | ||
1167 | int len = strlen(trace_options[i]); | ||
1168 | |||
1169 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
1170 | if (neg) | ||
1171 | trace_flags &= ~(1 << i); | ||
1172 | else | ||
1173 | trace_flags |= (1 << i); | ||
1174 | break; | ||
1175 | } | ||
1176 | } | ||
1177 | |||
1178 | filp->f_pos += cnt; | ||
1179 | |||
1180 | return cnt; | ||
1181 | } | ||
1182 | |||
1183 | static struct file_operations tracing_iter_fops = { | ||
1184 | .open = tracing_open_generic, | ||
1185 | .read = tracing_iter_ctrl_read, | ||
1186 | .write = tracing_iter_ctrl_write, | ||
1187 | }; | ||
1188 | |||
1189 | static ssize_t | ||
1190 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | ||
1191 | size_t cnt, loff_t *ppos) | ||
1192 | { | ||
1193 | struct trace_array *tr = filp->private_data; | ||
1194 | char buf[64]; | ||
1195 | int r; | ||
1196 | |||
1197 | r = sprintf(buf, "%ld\n", tr->ctrl); | ||
1198 | return simple_read_from_buffer(ubuf, cnt, ppos, | ||
1199 | buf, r); | ||
1200 | } | ||
1201 | |||
1202 | static ssize_t | ||
1203 | tracing_ctrl_write(struct file *filp, const char __user *ubuf, | ||
1204 | size_t cnt, loff_t *ppos) | ||
1205 | { | ||
1206 | struct trace_array *tr = filp->private_data; | ||
1207 | long val; | ||
1208 | char buf[64]; | ||
1209 | |||
1210 | if (cnt > 63) | ||
1211 | cnt = 63; | ||
1212 | |||
1213 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1214 | return -EFAULT; | ||
1215 | |||
1216 | buf[cnt] = 0; | ||
1217 | |||
1218 | val = simple_strtoul(buf, NULL, 10); | ||
1219 | |||
1220 | val = !!val; | ||
1221 | |||
1222 | mutex_lock(&trace_types_lock); | ||
1223 | if (tr->ctrl ^ val) { | ||
1224 | if (val) | ||
1225 | tracer_enabled = 1; | ||
1226 | else | ||
1227 | tracer_enabled = 0; | ||
1228 | |||
1229 | tr->ctrl = val; | ||
1230 | |||
1231 | if (current_trace && current_trace->ctrl_update) | ||
1232 | current_trace->ctrl_update(tr); | ||
1233 | } | ||
1234 | mutex_unlock(&trace_types_lock); | ||
1235 | |||
1236 | filp->f_pos += cnt; | ||
1237 | |||
1238 | return cnt; | ||
1239 | } | ||
1240 | |||
1241 | static ssize_t | ||
1242 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | ||
1243 | size_t cnt, loff_t *ppos) | ||
1244 | { | ||
1245 | char buf[max_tracer_type_len+2]; | ||
1246 | int r; | ||
1247 | |||
1248 | mutex_lock(&trace_types_lock); | ||
1249 | if (current_trace) | ||
1250 | r = sprintf(buf, "%s\n", current_trace->name); | ||
1251 | else | ||
1252 | r = sprintf(buf, "\n"); | ||
1253 | mutex_unlock(&trace_types_lock); | ||
1254 | |||
1255 | return simple_read_from_buffer(ubuf, cnt, ppos, | ||
1256 | buf, r); | ||
1257 | } | ||
1258 | |||
1259 | static ssize_t | ||
1260 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | ||
1261 | size_t cnt, loff_t *ppos) | ||
1262 | { | ||
1263 | struct trace_array *tr = &global_trace; | ||
1264 | struct tracer *t; | ||
1265 | char buf[max_tracer_type_len+1]; | ||
1266 | int i; | ||
1267 | |||
1268 | if (cnt > max_tracer_type_len) | ||
1269 | cnt = max_tracer_type_len; | ||
1270 | |||
1271 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1272 | return -EFAULT; | ||
1273 | |||
1274 | buf[cnt] = 0; | ||
1275 | |||
1276 | /* strip ending whitespace. */ | ||
1277 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | ||
1278 | buf[i] = 0; | ||
1279 | |||
1280 | mutex_lock(&trace_types_lock); | ||
1281 | for (t = trace_types; t; t = t->next) { | ||
1282 | if (strcmp(t->name, buf) == 0) | ||
1283 | break; | ||
1284 | } | ||
1285 | if (!t || t == current_trace) | ||
1286 | goto out; | ||
1287 | |||
1288 | if (current_trace && current_trace->reset) | ||
1289 | current_trace->reset(tr); | ||
1290 | |||
1291 | current_trace = t; | ||
1292 | if (t->init) | ||
1293 | t->init(tr); | ||
1294 | |||
1295 | out: | ||
1296 | mutex_unlock(&trace_types_lock); | ||
1297 | |||
1298 | filp->f_pos += cnt; | ||
1299 | |||
1300 | return cnt; | ||
1301 | } | ||
1302 | |||
1303 | static ssize_t | ||
1304 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | ||
1305 | size_t cnt, loff_t *ppos) | ||
1306 | { | ||
1307 | unsigned long *ptr = filp->private_data; | ||
1308 | char buf[64]; | ||
1309 | int r; | ||
1310 | |||
1311 | r = snprintf(buf, 64, "%ld\n", | ||
1312 | *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); | ||
1313 | if (r > 64) | ||
1314 | r = 64; | ||
1315 | return simple_read_from_buffer(ubuf, cnt, ppos, | ||
1316 | buf, r); | ||
1317 | } | ||
1318 | |||
1319 | static ssize_t | ||
1320 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | ||
1321 | size_t cnt, loff_t *ppos) | ||
1322 | { | ||
1323 | long *ptr = filp->private_data; | ||
1324 | long val; | ||
1325 | char buf[64]; | ||
1326 | |||
1327 | if (cnt > 63) | ||
1328 | cnt = 63; | ||
1329 | |||
1330 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1331 | return -EFAULT; | ||
1332 | |||
1333 | buf[cnt] = 0; | ||
1334 | |||
1335 | val = simple_strtoul(buf, NULL, 10); | ||
1336 | |||
1337 | *ptr = val * 1000; | ||
1338 | |||
1339 | return cnt; | ||
1340 | } | ||
1341 | |||
1342 | static struct file_operations tracing_max_lat_fops = { | ||
1343 | .open = tracing_open_generic, | ||
1344 | .read = tracing_max_lat_read, | ||
1345 | .write = tracing_max_lat_write, | ||
1346 | }; | ||
1347 | |||
1348 | static struct file_operations tracing_ctrl_fops = { | ||
1349 | .open = tracing_open_generic, | ||
1350 | .read = tracing_ctrl_read, | ||
1351 | .write = tracing_ctrl_write, | ||
1352 | }; | ||
1353 | |||
1354 | static struct file_operations set_tracer_fops = { | ||
1355 | .open = tracing_open_generic, | ||
1356 | .read = tracing_set_trace_read, | ||
1357 | .write = tracing_set_trace_write, | ||
1358 | }; | ||
1359 | |||
1360 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
1361 | |||
1362 | static ssize_t | ||
1363 | tracing_read_long(struct file *filp, char __user *ubuf, | ||
1364 | size_t cnt, loff_t *ppos) | ||
1365 | { | ||
1366 | unsigned long *p = filp->private_data; | ||
1367 | char buf[64]; | ||
1368 | int r; | ||
1369 | |||
1370 | r = sprintf(buf, "%ld\n", *p); | ||
1371 | return simple_read_from_buffer(ubuf, cnt, ppos, | ||
1372 | buf, r); | ||
1373 | } | ||
1374 | |||
1375 | static struct file_operations tracing_read_long_fops = { | ||
1376 | .open = tracing_open_generic, | ||
1377 | .read = tracing_read_long, | ||
1378 | }; | ||
1379 | #endif | ||
1380 | |||
1381 | static struct dentry *d_tracer; | ||
1382 | |||
1383 | struct dentry *tracing_init_dentry(void) | ||
1384 | { | ||
1385 | static int once; | ||
1386 | |||
1387 | if (d_tracer) | ||
1388 | return d_tracer; | ||
1389 | |||
1390 | d_tracer = debugfs_create_dir("tracing", NULL); | ||
1391 | |||
1392 | if (!d_tracer && !once) { | ||
1393 | once = 1; | ||
1394 | pr_warning("Could not create debugfs directory 'tracing'\n"); | ||
1395 | return NULL; | ||
1396 | } | ||
1397 | |||
1398 | return d_tracer; | ||
1399 | } | ||
1400 | |||
1401 | static __init void tracer_init_debugfs(void) | ||
1402 | { | ||
1403 | struct dentry *d_tracer; | ||
1404 | struct dentry *entry; | ||
1405 | |||
1406 | d_tracer = tracing_init_dentry(); | ||
1407 | |||
1408 | entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, | ||
1409 | &global_trace, &tracing_ctrl_fops); | ||
1410 | if (!entry) | ||
1411 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | ||
1412 | |||
1413 | entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, | ||
1414 | NULL, &tracing_iter_fops); | ||
1415 | if (!entry) | ||
1416 | pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); | ||
1417 | |||
1418 | entry = debugfs_create_file("latency_trace", 0444, d_tracer, | ||
1419 | &global_trace, &tracing_lt_fops); | ||
1420 | if (!entry) | ||
1421 | pr_warning("Could not create debugfs 'latency_trace' entry\n"); | ||
1422 | |||
1423 | entry = debugfs_create_file("trace", 0444, d_tracer, | ||
1424 | &global_trace, &tracing_fops); | ||
1425 | if (!entry) | ||
1426 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
1427 | |||
1428 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, | ||
1429 | &global_trace, &show_traces_fops); | ||
1430 | if (!entry) | ||
1431 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
1432 | |||
1433 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, | ||
1434 | &global_trace, &set_tracer_fops); | ||
1435 | if (!entry) | ||
1436 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
1437 | |||
1438 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | ||
1439 | &tracing_max_latency, | ||
1440 | &tracing_max_lat_fops); | ||
1441 | if (!entry) | ||
1442 | pr_warning("Could not create debugfs " | ||
1443 | "'tracing_max_latency' entry\n"); | ||
1444 | |||
1445 | entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, | ||
1446 | &tracing_thresh, &tracing_max_lat_fops); | ||
1447 | if (!entry) | ||
1448 | pr_warning("Could not create debugfs " | ||
1449 | "'tracing_threash' entry\n"); | ||
1450 | |||
1451 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
1452 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | ||
1453 | &ftrace_update_tot_cnt, | ||
1454 | &tracing_read_long_fops); | ||
1455 | if (!entry) | ||
1456 | pr_warning("Could not create debugfs " | ||
1457 | "'dyn_ftrace_total_info' entry\n"); | ||
1458 | #endif | ||
1459 | } | ||
1460 | |||
1461 | /* dummy trace to disable tracing */ | ||
1462 | static struct tracer no_tracer __read_mostly = | ||
1463 | { | ||
1464 | .name = "none", | ||
1465 | }; | ||
1466 | |||
1467 | static inline notrace int page_order(const unsigned long size) | ||
1468 | { | ||
1469 | const unsigned long nr_pages = DIV_ROUND_UP(size, PAGE_SIZE); | ||
1470 | return ilog2(roundup_pow_of_two(nr_pages)); | ||
1471 | } | ||
1472 | |||
1473 | __init static int tracer_alloc_buffers(void) | ||
1474 | { | ||
1475 | const int order = page_order(trace_nr_entries * TRACE_ENTRY_SIZE); | ||
1476 | const unsigned long size = (1UL << order) << PAGE_SHIFT; | ||
1477 | struct trace_entry *array; | ||
1478 | int i; | ||
1479 | |||
1480 | for_each_possible_cpu(i) { | ||
1481 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | ||
1482 | max_tr.data[i] = &per_cpu(max_data, i); | ||
1483 | |||
1484 | array = (struct trace_entry *) | ||
1485 | __get_free_pages(GFP_KERNEL, order); | ||
1486 | if (array == NULL) { | ||
1487 | printk(KERN_ERR "tracer: failed to allocate" | ||
1488 | " %ld bytes for trace buffer!\n", size); | ||
1489 | goto free_buffers; | ||
1490 | } | ||
1491 | global_trace.data[i]->trace = array; | ||
1492 | |||
1493 | /* Only allocate if we are actually using the max trace */ | ||
1494 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
1495 | array = (struct trace_entry *) | ||
1496 | __get_free_pages(GFP_KERNEL, order); | ||
1497 | if (array == NULL) { | ||
1498 | printk(KERN_ERR "wakeup tracer: failed to allocate" | ||
1499 | " %ld bytes for trace buffer!\n", size); | ||
1500 | goto free_buffers; | ||
1501 | } | ||
1502 | max_tr.data[i]->trace = array; | ||
1503 | #endif | ||
1504 | } | ||
1505 | |||
1506 | /* | ||
1507 | * Since we allocate by orders of pages, we may be able to | ||
1508 | * round up a bit. | ||
1509 | */ | ||
1510 | global_trace.entries = size / TRACE_ENTRY_SIZE; | ||
1511 | max_tr.entries = global_trace.entries; | ||
1512 | |||
1513 | pr_info("tracer: %ld bytes allocated for %ld", | ||
1514 | size, trace_nr_entries); | ||
1515 | pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE); | ||
1516 | pr_info(" actual entries %ld\n", global_trace.entries); | ||
1517 | |||
1518 | tracer_init_debugfs(); | ||
1519 | |||
1520 | trace_init_cmdlines(); | ||
1521 | |||
1522 | register_tracer(&no_tracer); | ||
1523 | current_trace = &no_tracer; | ||
1524 | |||
1525 | return 0; | ||
1526 | |||
1527 | free_buffers: | ||
1528 | for (i-- ; i >= 0; i--) { | ||
1529 | struct trace_array_cpu *data = global_trace.data[i]; | ||
1530 | |||
1531 | if (data && data->trace) { | ||
1532 | free_pages((unsigned long)data->trace, order); | ||
1533 | data->trace = NULL; | ||
1534 | } | ||
1535 | |||
1536 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
1537 | data = max_tr.data[i]; | ||
1538 | if (data && data->trace) { | ||
1539 | free_pages((unsigned long)data->trace, order); | ||
1540 | data->trace = NULL; | ||
1541 | } | ||
1542 | #endif | ||
1543 | } | ||
1544 | return -ENOMEM; | ||
1545 | } | ||
1546 | |||
1547 | device_initcall(tracer_alloc_buffers); | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h new file mode 100644 index 000000000000..3173a93561d4 --- /dev/null +++ b/kernel/trace/trace.h | |||
@@ -0,0 +1,184 @@ | |||
1 | #ifndef _LINUX_KERNEL_TRACE_H | ||
2 | #define _LINUX_KERNEL_TRACE_H | ||
3 | |||
4 | #include <linux/fs.h> | ||
5 | #include <asm/atomic.h> | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/clocksource.h> | ||
8 | |||
9 | /* | ||
10 | * Function trace entry - function address and parent function addres: | ||
11 | */ | ||
12 | struct ftrace_entry { | ||
13 | unsigned long ip; | ||
14 | unsigned long parent_ip; | ||
15 | }; | ||
16 | |||
17 | /* | ||
18 | * Context switch trace entry - which task (and prio) we switched from/to: | ||
19 | */ | ||
20 | struct ctx_switch_entry { | ||
21 | unsigned int prev_pid; | ||
22 | unsigned char prev_prio; | ||
23 | unsigned char prev_state; | ||
24 | unsigned int next_pid; | ||
25 | unsigned char next_prio; | ||
26 | }; | ||
27 | |||
28 | /* | ||
29 | * The trace entry - the most basic unit of tracing. This is what | ||
30 | * is printed in the end as a single line in the trace output, such as: | ||
31 | * | ||
32 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | ||
33 | */ | ||
34 | struct trace_entry { | ||
35 | char type; | ||
36 | char cpu; | ||
37 | char flags; | ||
38 | char preempt_count; | ||
39 | int pid; | ||
40 | cycle_t t; | ||
41 | unsigned long idx; | ||
42 | union { | ||
43 | struct ftrace_entry fn; | ||
44 | struct ctx_switch_entry ctx; | ||
45 | }; | ||
46 | }; | ||
47 | |||
48 | #define TRACE_ENTRY_SIZE sizeof(struct trace_entry) | ||
49 | |||
50 | /* | ||
51 | * The CPU trace array - it consists of thousands of trace entries | ||
52 | * plus some other descriptor data: (for example which task started | ||
53 | * the trace, etc.) | ||
54 | */ | ||
55 | struct trace_array_cpu { | ||
56 | void *trace; | ||
57 | unsigned long trace_idx; | ||
58 | atomic_t disabled; | ||
59 | atomic_t underrun; | ||
60 | unsigned long saved_latency; | ||
61 | unsigned long critical_start; | ||
62 | unsigned long critical_end; | ||
63 | unsigned long critical_sequence; | ||
64 | unsigned long nice; | ||
65 | unsigned long policy; | ||
66 | unsigned long rt_priority; | ||
67 | cycle_t preempt_timestamp; | ||
68 | pid_t pid; | ||
69 | uid_t uid; | ||
70 | char comm[TASK_COMM_LEN]; | ||
71 | }; | ||
72 | |||
73 | struct trace_iterator; | ||
74 | |||
75 | /* | ||
76 | * The trace array - an array of per-CPU trace arrays. This is the | ||
77 | * highest level data structure that individual tracers deal with. | ||
78 | * They have on/off state as well: | ||
79 | */ | ||
80 | struct trace_array { | ||
81 | unsigned long entries; | ||
82 | long ctrl; | ||
83 | int cpu; | ||
84 | cycle_t time_start; | ||
85 | struct trace_array_cpu *data[NR_CPUS]; | ||
86 | }; | ||
87 | |||
88 | /* | ||
89 | * A specific tracer, represented by methods that operate on a trace array: | ||
90 | */ | ||
91 | struct tracer { | ||
92 | const char *name; | ||
93 | void (*init)(struct trace_array *tr); | ||
94 | void (*reset)(struct trace_array *tr); | ||
95 | void (*open)(struct trace_iterator *iter); | ||
96 | void (*close)(struct trace_iterator *iter); | ||
97 | void (*start)(struct trace_iterator *iter); | ||
98 | void (*stop)(struct trace_iterator *iter); | ||
99 | void (*ctrl_update)(struct trace_array *tr); | ||
100 | struct tracer *next; | ||
101 | int print_max; | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * Trace iterator - used by printout routines who present trace | ||
106 | * results to users and which routines might sleep, etc: | ||
107 | */ | ||
108 | struct trace_iterator { | ||
109 | struct trace_array *tr; | ||
110 | struct tracer *trace; | ||
111 | struct trace_entry *ent; | ||
112 | unsigned long iter_flags; | ||
113 | loff_t pos; | ||
114 | unsigned long next_idx[NR_CPUS]; | ||
115 | int cpu; | ||
116 | int idx; | ||
117 | }; | ||
118 | |||
119 | void notrace tracing_reset(struct trace_array_cpu *data); | ||
120 | int tracing_open_generic(struct inode *inode, struct file *filp); | ||
121 | struct dentry *tracing_init_dentry(void); | ||
122 | void ftrace(struct trace_array *tr, | ||
123 | struct trace_array_cpu *data, | ||
124 | unsigned long ip, | ||
125 | unsigned long parent_ip, | ||
126 | unsigned long flags); | ||
127 | void tracing_sched_switch_trace(struct trace_array *tr, | ||
128 | struct trace_array_cpu *data, | ||
129 | struct task_struct *prev, | ||
130 | struct task_struct *next, | ||
131 | unsigned long flags); | ||
132 | void tracing_record_cmdline(struct task_struct *tsk); | ||
133 | |||
134 | void tracing_start_function_trace(void); | ||
135 | void tracing_stop_function_trace(void); | ||
136 | int register_tracer(struct tracer *type); | ||
137 | void unregister_tracer(struct tracer *type); | ||
138 | |||
139 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | ||
140 | |||
141 | extern unsigned long tracing_max_latency; | ||
142 | extern unsigned long tracing_thresh; | ||
143 | |||
144 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | ||
145 | void update_max_tr_single(struct trace_array *tr, | ||
146 | struct task_struct *tsk, int cpu); | ||
147 | |||
148 | static inline notrace cycle_t now(int cpu) | ||
149 | { | ||
150 | return cpu_clock(cpu); | ||
151 | } | ||
152 | |||
153 | #ifdef CONFIG_SCHED_TRACER | ||
154 | extern void notrace | ||
155 | wakeup_sched_switch(struct task_struct *prev, struct task_struct *next); | ||
156 | #else | ||
157 | static inline void | ||
158 | wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) | ||
159 | { | ||
160 | } | ||
161 | #endif | ||
162 | |||
163 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | ||
164 | typedef void | ||
165 | (*tracer_switch_func_t)(void *private, | ||
166 | struct task_struct *prev, | ||
167 | struct task_struct *next); | ||
168 | |||
169 | struct tracer_switch_ops { | ||
170 | tracer_switch_func_t func; | ||
171 | void *private; | ||
172 | struct tracer_switch_ops *next; | ||
173 | }; | ||
174 | |||
175 | extern int register_tracer_switch(struct tracer_switch_ops *ops); | ||
176 | extern int unregister_tracer_switch(struct tracer_switch_ops *ops); | ||
177 | |||
178 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | ||
179 | |||
180 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
181 | extern unsigned long ftrace_update_tot_cnt; | ||
182 | #endif | ||
183 | |||
184 | #endif /* _LINUX_KERNEL_TRACE_H */ | ||