diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
commit | a1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch) | |
tree | 0f1777542b385ebefd30b3586d830fd8ed6fda5b /kernel/trace/trace.h | |
parent | 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff) | |
parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) |
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts:
arch/Kconfig
kernel/trace/trace.h
Merge reason: resolve the conflicts, plus adopt to the new
ring-buffer APIs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r-- | kernel/trace/trace.h | 87 |
1 files changed, 61 insertions, 26 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ff1ef411a176..ea7e0bcbd539 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -38,8 +38,6 @@ enum trace_type { | |||
38 | TRACE_GRAPH_ENT, | 38 | TRACE_GRAPH_ENT, |
39 | TRACE_USER_STACK, | 39 | TRACE_USER_STACK, |
40 | TRACE_HW_BRANCHES, | 40 | TRACE_HW_BRANCHES, |
41 | TRACE_SYSCALL_ENTER, | ||
42 | TRACE_SYSCALL_EXIT, | ||
43 | TRACE_KMEM_ALLOC, | 41 | TRACE_KMEM_ALLOC, |
44 | TRACE_KMEM_FREE, | 42 | TRACE_KMEM_FREE, |
45 | TRACE_POWER, | 43 | TRACE_POWER, |
@@ -251,9 +249,6 @@ struct trace_array_cpu { | |||
251 | atomic_t disabled; | 249 | atomic_t disabled; |
252 | void *buffer_page; /* ring buffer spare */ | 250 | void *buffer_page; /* ring buffer spare */ |
253 | 251 | ||
254 | /* these fields get copied into max-trace: */ | ||
255 | unsigned long trace_idx; | ||
256 | unsigned long overrun; | ||
257 | unsigned long saved_latency; | 252 | unsigned long saved_latency; |
258 | unsigned long critical_start; | 253 | unsigned long critical_start; |
259 | unsigned long critical_end; | 254 | unsigned long critical_end; |
@@ -261,6 +256,7 @@ struct trace_array_cpu { | |||
261 | unsigned long nice; | 256 | unsigned long nice; |
262 | unsigned long policy; | 257 | unsigned long policy; |
263 | unsigned long rt_priority; | 258 | unsigned long rt_priority; |
259 | unsigned long skipped_entries; | ||
264 | cycle_t preempt_timestamp; | 260 | cycle_t preempt_timestamp; |
265 | pid_t pid; | 261 | pid_t pid; |
266 | uid_t uid; | 262 | uid_t uid; |
@@ -334,10 +330,6 @@ extern void __ftrace_bad_type(void); | |||
334 | TRACE_KMEM_ALLOC); \ | 330 | TRACE_KMEM_ALLOC); \ |
335 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 331 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
336 | TRACE_KMEM_FREE); \ | 332 | TRACE_KMEM_FREE); \ |
337 | IF_ASSIGN(var, ent, struct syscall_trace_enter, \ | ||
338 | TRACE_SYSCALL_ENTER); \ | ||
339 | IF_ASSIGN(var, ent, struct syscall_trace_exit, \ | ||
340 | TRACE_SYSCALL_EXIT); \ | ||
341 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ | 333 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ |
342 | __ftrace_bad_type(); \ | 334 | __ftrace_bad_type(); \ |
343 | } while (0) | 335 | } while (0) |
@@ -439,12 +431,13 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | |||
439 | 431 | ||
440 | struct ring_buffer_event; | 432 | struct ring_buffer_event; |
441 | 433 | ||
442 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 434 | struct ring_buffer_event * |
443 | int type, | 435 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
444 | unsigned long len, | 436 | int type, |
445 | unsigned long flags, | 437 | unsigned long len, |
446 | int pc); | 438 | unsigned long flags, |
447 | void trace_buffer_unlock_commit(struct trace_array *tr, | 439 | int pc); |
440 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, | ||
448 | struct ring_buffer_event *event, | 441 | struct ring_buffer_event *event, |
449 | unsigned long flags, int pc); | 442 | unsigned long flags, int pc); |
450 | 443 | ||
@@ -454,10 +447,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
454 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 447 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
455 | int *ent_cpu, u64 *ent_ts); | 448 | int *ent_cpu, u64 *ent_ts); |
456 | 449 | ||
457 | void tracing_generic_entry_update(struct trace_entry *entry, | ||
458 | unsigned long flags, | ||
459 | int pc); | ||
460 | |||
461 | void default_wait_pipe(struct trace_iterator *iter); | 450 | void default_wait_pipe(struct trace_iterator *iter); |
462 | void poll_wait_pipe(struct trace_iterator *iter); | 451 | void poll_wait_pipe(struct trace_iterator *iter); |
463 | 452 | ||
@@ -487,6 +476,7 @@ void trace_function(struct trace_array *tr, | |||
487 | 476 | ||
488 | void trace_graph_return(struct ftrace_graph_ret *trace); | 477 | void trace_graph_return(struct ftrace_graph_ret *trace); |
489 | int trace_graph_entry(struct ftrace_graph_ent *trace); | 478 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
479 | void set_graph_array(struct trace_array *tr); | ||
490 | 480 | ||
491 | void tracing_start_cmdline_record(void); | 481 | void tracing_start_cmdline_record(void); |
492 | void tracing_stop_cmdline_record(void); | 482 | void tracing_stop_cmdline_record(void); |
@@ -498,16 +488,40 @@ void unregister_tracer(struct tracer *type); | |||
498 | 488 | ||
499 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 489 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
500 | 490 | ||
491 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
501 | extern unsigned long tracing_max_latency; | 492 | extern unsigned long tracing_max_latency; |
502 | extern unsigned long tracing_thresh; | 493 | extern unsigned long tracing_thresh; |
503 | 494 | ||
504 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | 495 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
505 | void update_max_tr_single(struct trace_array *tr, | 496 | void update_max_tr_single(struct trace_array *tr, |
506 | struct task_struct *tsk, int cpu); | 497 | struct task_struct *tsk, int cpu); |
498 | #endif /* CONFIG_TRACER_MAX_TRACE */ | ||
499 | |||
500 | #ifdef CONFIG_STACKTRACE | ||
501 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, | ||
502 | int skip, int pc); | ||
503 | |||
504 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, | ||
505 | int pc); | ||
507 | 506 | ||
508 | void __trace_stack(struct trace_array *tr, | 507 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
509 | unsigned long flags, | 508 | int pc); |
510 | int skip, int pc); | 509 | #else |
510 | static inline void ftrace_trace_stack(struct trace_array *tr, | ||
511 | unsigned long flags, int skip, int pc) | ||
512 | { | ||
513 | } | ||
514 | |||
515 | static inline void ftrace_trace_userstack(struct trace_array *tr, | ||
516 | unsigned long flags, int pc) | ||
517 | { | ||
518 | } | ||
519 | |||
520 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | ||
521 | int skip, int pc) | ||
522 | { | ||
523 | } | ||
524 | #endif /* CONFIG_STACKTRACE */ | ||
511 | 525 | ||
512 | extern cycle_t ftrace_now(int cpu); | 526 | extern cycle_t ftrace_now(int cpu); |
513 | 527 | ||
@@ -533,6 +547,10 @@ extern unsigned long ftrace_update_tot_cnt; | |||
533 | extern int DYN_FTRACE_TEST_NAME(void); | 547 | extern int DYN_FTRACE_TEST_NAME(void); |
534 | #endif | 548 | #endif |
535 | 549 | ||
550 | extern int ring_buffer_expanded; | ||
551 | extern bool tracing_selftest_disabled; | ||
552 | DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); | ||
553 | |||
536 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 554 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
537 | extern int trace_selftest_startup_function(struct tracer *trace, | 555 | extern int trace_selftest_startup_function(struct tracer *trace, |
538 | struct trace_array *tr); | 556 | struct trace_array *tr); |
@@ -566,9 +584,16 @@ extern int | |||
566 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); | 584 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
567 | extern int | 585 | extern int |
568 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); | 586 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
587 | extern int | ||
588 | trace_array_vprintk(struct trace_array *tr, | ||
589 | unsigned long ip, const char *fmt, va_list args); | ||
590 | int trace_array_printk(struct trace_array *tr, | ||
591 | unsigned long ip, const char *fmt, ...); | ||
569 | 592 | ||
570 | extern unsigned long trace_flags; | 593 | extern unsigned long trace_flags; |
571 | 594 | ||
595 | extern int trace_clock_id; | ||
596 | |||
572 | /* Standard output formatting function used for function return traces */ | 597 | /* Standard output formatting function used for function return traces */ |
573 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 598 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
574 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | 599 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); |
@@ -615,6 +640,7 @@ print_graph_function(struct trace_iterator *iter) | |||
615 | 640 | ||
616 | extern struct pid *ftrace_pid_trace; | 641 | extern struct pid *ftrace_pid_trace; |
617 | 642 | ||
643 | #ifdef CONFIG_FUNCTION_TRACER | ||
618 | static inline int ftrace_trace_task(struct task_struct *task) | 644 | static inline int ftrace_trace_task(struct task_struct *task) |
619 | { | 645 | { |
620 | if (!ftrace_pid_trace) | 646 | if (!ftrace_pid_trace) |
@@ -622,6 +648,12 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
622 | 648 | ||
623 | return test_tsk_trace_trace(task); | 649 | return test_tsk_trace_trace(task); |
624 | } | 650 | } |
651 | #else | ||
652 | static inline int ftrace_trace_task(struct task_struct *task) | ||
653 | { | ||
654 | return 1; | ||
655 | } | ||
656 | #endif | ||
625 | 657 | ||
626 | /* | 658 | /* |
627 | * trace_iterator_flags is an enumeration that defines bit | 659 | * trace_iterator_flags is an enumeration that defines bit |
@@ -650,9 +682,8 @@ enum trace_iterator_flags { | |||
650 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, | 682 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, |
651 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ | 683 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ |
652 | TRACE_ITER_LATENCY_FMT = 0x40000, | 684 | TRACE_ITER_LATENCY_FMT = 0x40000, |
653 | TRACE_ITER_GLOBAL_CLK = 0x80000, | 685 | TRACE_ITER_SLEEP_TIME = 0x80000, |
654 | TRACE_ITER_SLEEP_TIME = 0x100000, | 686 | TRACE_ITER_GRAPH_TIME = 0x100000, |
655 | TRACE_ITER_GRAPH_TIME = 0x200000, | ||
656 | }; | 687 | }; |
657 | 688 | ||
658 | /* | 689 | /* |
@@ -749,6 +780,7 @@ struct ftrace_event_field { | |||
749 | struct list_head link; | 780 | struct list_head link; |
750 | char *name; | 781 | char *name; |
751 | char *type; | 782 | char *type; |
783 | int filter_type; | ||
752 | int offset; | 784 | int offset; |
753 | int size; | 785 | int size; |
754 | int is_signed; | 786 | int is_signed; |
@@ -758,13 +790,15 @@ struct event_filter { | |||
758 | int n_preds; | 790 | int n_preds; |
759 | struct filter_pred **preds; | 791 | struct filter_pred **preds; |
760 | char *filter_string; | 792 | char *filter_string; |
793 | bool no_reset; | ||
761 | }; | 794 | }; |
762 | 795 | ||
763 | struct event_subsystem { | 796 | struct event_subsystem { |
764 | struct list_head list; | 797 | struct list_head list; |
765 | const char *name; | 798 | const char *name; |
766 | struct dentry *entry; | 799 | struct dentry *entry; |
767 | void *filter; | 800 | struct event_filter *filter; |
801 | int nr_events; | ||
768 | }; | 802 | }; |
769 | 803 | ||
770 | struct filter_pred; | 804 | struct filter_pred; |
@@ -792,6 +826,7 @@ extern int apply_subsystem_event_filter(struct event_subsystem *system, | |||
792 | char *filter_string); | 826 | char *filter_string); |
793 | extern void print_subsystem_event_filter(struct event_subsystem *system, | 827 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
794 | struct trace_seq *s); | 828 | struct trace_seq *s); |
829 | extern int filter_assign_type(const char *type); | ||
795 | 830 | ||
796 | static inline int | 831 | static inline int |
797 | filter_check_discard(struct ftrace_event_call *call, void *rec, | 832 | filter_check_discard(struct ftrace_event_call *call, void *rec, |