aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-04-23 10:47:51 -0400
committerRobert Richter <robert.richter@amd.com>2010-04-23 10:47:51 -0400
commitb971f06187d83b5c03d2b597cccdfef421c0ca91 (patch)
tree849dbe485ca9472bea002f94681882ce103fe3cd /include/trace
parentcb6e943ccf19ab6d3189147e9d625a992e016084 (diff)
parentc1ab9cab75098924fa8226a8a371de66977439df (diff)
Merge commit 'tip/tracing/core' into oprofile/core
Conflicts: drivers/oprofile/cpu_buffer.c Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/module.h18
-rw-r--r--include/trace/events/signal.h52
-rw-r--r--include/trace/ftrace.h33
3 files changed, 52 insertions, 51 deletions
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 4b0f48ba16a6..c7bb2f0482fe 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free,
51 TP_printk("%s", __get_str(name)) 51 TP_printk("%s", __get_str(name))
52); 52);
53 53
54#ifdef CONFIG_MODULE_UNLOAD
55/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
56
54DECLARE_EVENT_CLASS(module_refcnt, 57DECLARE_EVENT_CLASS(module_refcnt,
55 58
56 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 59 TP_PROTO(struct module *mod, unsigned long ip),
57 60
58 TP_ARGS(mod, ip, refcnt), 61 TP_ARGS(mod, ip),
59 62
60 TP_STRUCT__entry( 63 TP_STRUCT__entry(
61 __field( unsigned long, ip ) 64 __field( unsigned long, ip )
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
65 68
66 TP_fast_assign( 69 TP_fast_assign(
67 __entry->ip = ip; 70 __entry->ip = ip;
68 __entry->refcnt = refcnt; 71 __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
69 __assign_str(name, mod->name); 72 __assign_str(name, mod->name);
70 ), 73 ),
71 74
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt,
75 78
76DEFINE_EVENT(module_refcnt, module_get, 79DEFINE_EVENT(module_refcnt, module_get,
77 80
78 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 81 TP_PROTO(struct module *mod, unsigned long ip),
79 82
80 TP_ARGS(mod, ip, refcnt) 83 TP_ARGS(mod, ip)
81); 84);
82 85
83DEFINE_EVENT(module_refcnt, module_put, 86DEFINE_EVENT(module_refcnt, module_put,
84 87
85 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 88 TP_PROTO(struct module *mod, unsigned long ip),
86 89
87 TP_ARGS(mod, ip, refcnt) 90 TP_ARGS(mod, ip)
88); 91);
92#endif /* CONFIG_MODULE_UNLOAD */
89 93
90TRACE_EVENT(module_request, 94TRACE_EVENT(module_request,
91 95
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index a510b75ac304..814566c99d29 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver,
100 __entry->sa_handler, __entry->sa_flags) 100 __entry->sa_handler, __entry->sa_flags)
101); 101);
102 102
103/** 103DECLARE_EVENT_CLASS(signal_queue_overflow,
104 * signal_overflow_fail - called when signal queue is overflow
105 * @sig: signal number
106 * @group: signal to process group or not (bool)
107 * @info: pointer to struct siginfo
108 *
109 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
110 * siginfo queue is overflow, and the signal is dropped.
111 * 'group' is not 0 if the signal will be sent to a process group.
112 * 'sig' is always one of RT signals.
113 */
114TRACE_EVENT(signal_overflow_fail,
115 104
116 TP_PROTO(int sig, int group, struct siginfo *info), 105 TP_PROTO(int sig, int group, struct siginfo *info),
117 106
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail,
135); 124);
136 125
137/** 126/**
127 * signal_overflow_fail - called when signal queue is overflow
128 * @sig: signal number
129 * @group: signal to process group or not (bool)
130 * @info: pointer to struct siginfo
131 *
132 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
133 * siginfo queue is overflow, and the signal is dropped.
134 * 'group' is not 0 if the signal will be sent to a process group.
135 * 'sig' is always one of RT signals.
136 */
137DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
138
139 TP_PROTO(int sig, int group, struct siginfo *info),
140
141 TP_ARGS(sig, group, info)
142);
143
144/**
138 * signal_lose_info - called when siginfo is lost 145 * signal_lose_info - called when siginfo is lost
139 * @sig: signal number 146 * @sig: signal number
140 * @group: signal to process group or not (bool) 147 * @group: signal to process group or not (bool)
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail,
145 * 'group' is not 0 if the signal will be sent to a process group. 152 * 'group' is not 0 if the signal will be sent to a process group.
146 * 'sig' is always one of non-RT signals. 153 * 'sig' is always one of non-RT signals.
147 */ 154 */
148TRACE_EVENT(signal_lose_info, 155DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
149 156
150 TP_PROTO(int sig, int group, struct siginfo *info), 157 TP_PROTO(int sig, int group, struct siginfo *info),
151 158
152 TP_ARGS(sig, group, info), 159 TP_ARGS(sig, group, info)
153
154 TP_STRUCT__entry(
155 __field( int, sig )
156 __field( int, group )
157 __field( int, errno )
158 __field( int, code )
159 ),
160
161 TP_fast_assign(
162 __entry->sig = sig;
163 __entry->group = group;
164 TP_STORE_SIGINFO(__entry, info);
165 ),
166
167 TP_printk("sig=%d group=%d errno=%d code=%d",
168 __entry->sig, __entry->group, __entry->errno, __entry->code)
169); 160);
161
170#endif /* _TRACE_SIGNAL_H */ 162#endif /* _TRACE_SIGNAL_H */
171 163
172/* This part must be outside protection */ 164/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index ea6f9d4a20e9..75dd7787fb37 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -154,9 +154,11 @@
154 * 154 *
155 * field = (typeof(field))entry; 155 * field = (typeof(field))entry;
156 * 156 *
157 * p = get_cpu_var(ftrace_event_seq); 157 * p = &get_cpu_var(ftrace_event_seq);
158 * trace_seq_init(p); 158 * trace_seq_init(p);
159 * ret = trace_seq_printf(s, <TP_printk> "\n"); 159 * ret = trace_seq_printf(s, "%s: ", <call>);
160 * if (ret)
161 * ret = trace_seq_printf(s, <TP_printk> "\n");
160 * put_cpu(); 162 * put_cpu();
161 * if (!ret) 163 * if (!ret)
162 * return TRACE_TYPE_PARTIAL_LINE; 164 * return TRACE_TYPE_PARTIAL_LINE;
@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
450 * 452 *
451 * static void ftrace_raw_event_<call>(proto) 453 * static void ftrace_raw_event_<call>(proto)
452 * { 454 * {
455 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
453 * struct ring_buffer_event *event; 456 * struct ring_buffer_event *event;
454 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 457 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
455 * struct ring_buffer *buffer; 458 * struct ring_buffer *buffer;
456 * unsigned long irq_flags; 459 * unsigned long irq_flags;
460 * int __data_size;
457 * int pc; 461 * int pc;
458 * 462 *
459 * local_save_flags(irq_flags); 463 * local_save_flags(irq_flags);
460 * pc = preempt_count(); 464 * pc = preempt_count();
461 * 465 *
466 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
467 *
462 * event = trace_current_buffer_lock_reserve(&buffer, 468 * event = trace_current_buffer_lock_reserve(&buffer,
463 * event_<call>.id, 469 * event_<call>.id,
464 * sizeof(struct ftrace_raw_<call>), 470 * sizeof(*entry) + __data_size,
465 * irq_flags, pc); 471 * irq_flags, pc);
466 * if (!event) 472 * if (!event)
467 * return; 473 * return;
468 * entry = ring_buffer_event_data(event); 474 * entry = ring_buffer_event_data(event);
469 * 475 *
470 * <assign>; <-- Here we assign the entries by the __field and 476 * { <assign>; } <-- Here we assign the entries by the __field and
471 * __array macros. 477 * __array macros.
472 * 478 *
473 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); 479 * if (!filter_current_check_discard(buffer, event_call, entry, event))
480 * trace_current_buffer_unlock_commit(buffer,
481 * event, irq_flags, pc);
474 * } 482 * }
475 * 483 *
476 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) 484 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
477 * { 485 * {
478 * int ret; 486 * return register_trace_<call>(ftrace_raw_event_<call>);
479 *
480 * ret = register_trace_<call>(ftrace_raw_event_<call>);
481 * if (!ret)
482 * pr_info("event trace: Could not activate trace point "
483 * "probe to <call>");
484 * return ret;
485 * } 487 * }
486 * 488 *
487 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 489 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
493 * .trace = ftrace_raw_output_<call>, <-- stage 2 495 * .trace = ftrace_raw_output_<call>, <-- stage 2
494 * }; 496 * };
495 * 497 *
498 * static const char print_fmt_<call>[] = <TP_printk>;
499 *
496 * static struct ftrace_event_call __used 500 * static struct ftrace_event_call __used
497 * __attribute__((__aligned__(4))) 501 * __attribute__((__aligned__(4)))
498 * __attribute__((section("_ftrace_events"))) event_<call> = { 502 * __attribute__((section("_ftrace_events"))) event_<call> = {
@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
501 * .raw_init = trace_event_raw_init, 505 * .raw_init = trace_event_raw_init,
502 * .regfunc = ftrace_reg_event_<call>, 506 * .regfunc = ftrace_reg_event_<call>,
503 * .unregfunc = ftrace_unreg_event_<call>, 507 * .unregfunc = ftrace_unreg_event_<call>,
508 * .print_fmt = print_fmt_<call>,
509 * .define_fields = ftrace_define_fields_<call>,
504 * } 510 * }
505 * 511 *
506 */ 512 */
@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
569 return; \ 575 return; \
570 entry = ring_buffer_event_data(event); \ 576 entry = ring_buffer_event_data(event); \
571 \ 577 \
572 \
573 tstruct \ 578 tstruct \
574 \ 579 \
575 { assign; } \ 580 { assign; } \