aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:20:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 13:20:25 -0500
commit6556a6743549defc32e5f90ee2cb1ecd833a44c3 (patch)
tree622306583d4a3c13235a8bfc012854c125c597f1 /include/trace
parente0d272429a34ff143bfa04ee8e29dd4eed2964c7 (diff)
parent1dd2980d990068e20045b90c424518cc7f3657ff (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (172 commits) perf_event, amd: Fix spinlock initialization perf_event: Fix preempt warning in perf_clock() perf tools: Flush maps on COMM events perf_events, x86: Split PMU definitions into separate files perf annotate: Handle samples not at objdump output addr boundaries perf_events, x86: Remove superflous MSR writes perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in() perf_events, x86: AMD event scheduling perf_events: Add new start/stop PMU callbacks perf_events: Report the MMAP pgoff value in bytes perf annotate: Defer allocating sym_priv->hist array perf symbols: Improve debugging information about symtab origins perf top: Use a macro instead of a constant variable perf symbols: Check the right return variable perf/scripts: Tag syscall_name helper as not yet available perf/scripts: Add perf-trace-python Documentation perf/scripts: Remove unnecessary PyTuple resizes perf/scripts: Add syscall tracing scripts perf/scripts: Add Python scripting engine perf/scripts: Remove check-perf-trace from listed scripts ... Fix trivial conflict in tools/perf/util/probe-event.c
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/lock.h29
-rw-r--r--include/trace/ftrace.h60
-rw-r--r--include/trace/syscall.h4
3 files changed, 34 insertions, 59 deletions
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index a870ba125aa8..5c1dcfc16c60 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
20 TP_STRUCT__entry( 20 TP_STRUCT__entry(
21 __field(unsigned int, flags) 21 __field(unsigned int, flags)
22 __string(name, lock->name) 22 __string(name, lock->name)
23 __field(void *, lockdep_addr)
23 ), 24 ),
24 25
25 TP_fast_assign( 26 TP_fast_assign(
26 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); 27 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
27 __assign_str(name, lock->name); 28 __assign_str(name, lock->name);
29 __entry->lockdep_addr = lock;
28 ), 30 ),
29 31
30 TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", 32 TP_printk("%p %s%s%s", __entry->lockdep_addr,
33 (__entry->flags & 1) ? "try " : "",
31 (__entry->flags & 2) ? "read " : "", 34 (__entry->flags & 2) ? "read " : "",
32 __get_str(name)) 35 __get_str(name))
33); 36);
@@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,
40 43
41 TP_STRUCT__entry( 44 TP_STRUCT__entry(
42 __string(name, lock->name) 45 __string(name, lock->name)
46 __field(void *, lockdep_addr)
43 ), 47 ),
44 48
45 TP_fast_assign( 49 TP_fast_assign(
46 __assign_str(name, lock->name); 50 __assign_str(name, lock->name);
51 __entry->lockdep_addr = lock;
47 ), 52 ),
48 53
49 TP_printk("%s", __get_str(name)) 54 TP_printk("%p %s",
55 __entry->lockdep_addr, __get_str(name))
50); 56);
51 57
52#ifdef CONFIG_LOCK_STAT 58#ifdef CONFIG_LOCK_STAT
@@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,
59 65
60 TP_STRUCT__entry( 66 TP_STRUCT__entry(
61 __string(name, lock->name) 67 __string(name, lock->name)
68 __field(void *, lockdep_addr)
62 ), 69 ),
63 70
64 TP_fast_assign( 71 TP_fast_assign(
65 __assign_str(name, lock->name); 72 __assign_str(name, lock->name);
73 __entry->lockdep_addr = lock;
66 ), 74 ),
67 75
68 TP_printk("%s", __get_str(name)) 76 TP_printk("%p %s",
77 __entry->lockdep_addr, __get_str(name))
69); 78);
70 79
71TRACE_EVENT(lock_acquired, 80TRACE_EVENT(lock_acquired,
@@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired,
75 84
76 TP_STRUCT__entry( 85 TP_STRUCT__entry(
77 __string(name, lock->name) 86 __string(name, lock->name)
78 __field(unsigned long, wait_usec) 87 __field(s64, wait_nsec)
79 __field(unsigned long, wait_nsec_rem) 88 __field(void *, lockdep_addr)
80 ), 89 ),
90
81 TP_fast_assign( 91 TP_fast_assign(
82 __assign_str(name, lock->name); 92 __assign_str(name, lock->name);
83 __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); 93 __entry->wait_nsec = waittime;
84 __entry->wait_usec = (unsigned long) waittime; 94 __entry->lockdep_addr = lock;
85 ), 95 ),
86 TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, 96 TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
87 __entry->wait_nsec_rem) 97 __get_str(name),
98 __entry->wait_nsec)
88); 99);
89 100
90#endif 101#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index f23a0ca6910a..0804cd594803 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -376,7 +376,7 @@ static inline notrace int ftrace_get_offsets_##call( \
376 376
377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
378 378
379#ifdef CONFIG_EVENT_PROFILE 379#ifdef CONFIG_PERF_EVENTS
380 380
381/* 381/*
382 * Generate the functions needed for tracepoint perf_event support. 382 * Generate the functions needed for tracepoint perf_event support.
@@ -421,7 +421,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
421 421
422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
423 423
424#endif 424#endif /* CONFIG_PERF_EVENTS */
425 425
426/* 426/*
427 * Stage 4 of the trace events. 427 * Stage 4 of the trace events.
@@ -505,7 +505,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
505 * 505 *
506 */ 506 */
507 507
508#ifdef CONFIG_EVENT_PROFILE 508#ifdef CONFIG_PERF_EVENTS
509 509
510#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PROFILE_INIT(call) \
511 .profile_enable = ftrace_profile_enable_##call, \ 511 .profile_enable = ftrace_profile_enable_##call, \
@@ -513,7 +513,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
513 513
514#else 514#else
515#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PROFILE_INIT(call)
516#endif 516#endif /* CONFIG_PERF_EVENTS */
517 517
518#undef __entry 518#undef __entry
519#define __entry entry 519#define __entry entry
@@ -736,7 +736,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
736 * } 736 * }
737 */ 737 */
738 738
739#ifdef CONFIG_EVENT_PROFILE 739#ifdef CONFIG_PERF_EVENTS
740 740
741#undef __entry 741#undef __entry
742#define __entry entry 742#define __entry entry
@@ -761,22 +761,12 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \ 761 proto) \
762{ \ 762{ \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 extern int perf_swevent_get_recursion_context(void); \
765 extern void perf_swevent_put_recursion_context(int rctx); \
766 extern void perf_tp_event(int, u64, u64, void *, int); \
767 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
768 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
769 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
770 struct trace_entry *ent; \
771 int __entry_size; \ 767 int __entry_size; \
772 int __data_size; \ 768 int __data_size; \
773 char *trace_buf; \
774 char *raw_data; \
775 int __cpu; \
776 int rctx; \ 769 int rctx; \
777 int pc; \
778 \
779 pc = preempt_count(); \
780 \ 770 \
781 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 771 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
782 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 772 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
@@ -786,42 +776,16 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
786 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 776 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
787 "profile buffer not large enough")) \ 777 "profile buffer not large enough")) \
788 return; \ 778 return; \
789 \ 779 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
790 local_irq_save(irq_flags); \ 780 __entry_size, event_call->id, &rctx, &irq_flags); \
791 \ 781 if (!entry) \
792 rctx = perf_swevent_get_recursion_context(); \ 782 return; \
793 if (rctx < 0) \
794 goto end_recursion; \
795 \
796 __cpu = smp_processor_id(); \
797 \
798 if (in_nmi()) \
799 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
800 else \
801 trace_buf = rcu_dereference(perf_trace_buf); \
802 \
803 if (!trace_buf) \
804 goto end; \
805 \
806 raw_data = per_cpu_ptr(trace_buf, __cpu); \
807 \
808 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
809 entry = (struct ftrace_raw_##call *)raw_data; \
810 ent = &entry->ent; \
811 tracing_generic_entry_update(ent, irq_flags, pc); \
812 ent->type = event_call->id; \
813 \
814 tstruct \ 783 tstruct \
815 \ 784 \
816 { assign; } \ 785 { assign; } \
817 \ 786 \
818 perf_tp_event(event_call->id, __addr, __count, entry, \ 787 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
819 __entry_size); \ 788 __count, irq_flags); \
820 \
821end: \
822 perf_swevent_put_recursion_context(rctx); \
823end_recursion: \
824 local_irq_restore(irq_flags); \
825} 789}
826 790
827#undef DEFINE_EVENT 791#undef DEFINE_EVENT
@@ -838,7 +802,7 @@ static notrace void ftrace_profile_##call(proto) \
838 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 802 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
839 803
840#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 804#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
841#endif /* CONFIG_EVENT_PROFILE */ 805#endif /* CONFIG_PERF_EVENTS */
842 806
843#undef _TRACE_PROFILE_INIT 807#undef _TRACE_PROFILE_INIT
844 808
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 8cd410254456..0387100752f0 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -45,12 +45,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); 45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); 46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
47#endif 47#endif
48#ifdef CONFIG_EVENT_PROFILE 48
49#ifdef CONFIG_PERF_EVENTS
49int prof_sysenter_enable(struct ftrace_event_call *call); 50int prof_sysenter_enable(struct ftrace_event_call *call);
50void prof_sysenter_disable(struct ftrace_event_call *call); 51void prof_sysenter_disable(struct ftrace_event_call *call);
51int prof_sysexit_enable(struct ftrace_event_call *call); 52int prof_sysexit_enable(struct ftrace_event_call *call);
52void prof_sysexit_disable(struct ftrace_event_call *call); 53void prof_sysexit_disable(struct ftrace_event_call *call);
53
54#endif 54#endif
55 55
56#endif /* _TRACE_SYSCALL_H */ 56#endif /* _TRACE_SYSCALL_H */