aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-16 15:16:37 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-16 15:16:37 -0400
commit40d9d82c8ab8c4e2373a23a1e31dc8d84c53aa01 (patch)
tree610700282791b0469bbf09921993857e0b0b51fe /kernel/trace
parent983f2163e7fdf11a15e05816de243f93f07eafca (diff)
parentb36461da2a0389149d7f88f3cbc05a30d1db9faa (diff)
Merge branch 'tip/tracing/core4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/core
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig28
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace_clock.c24
-rw-r--r--kernel/trace/trace_entries.h27
-rw-r--r--kernel/trace/trace_events.c14
-rw-r--r--kernel/trace/trace_export.c71
6 files changed, 104 insertions, 64 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 1ea0d1234f4a..e71634604400 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -11,12 +11,18 @@ config NOP_TRACER
11 11
12config HAVE_FTRACE_NMI_ENTER 12config HAVE_FTRACE_NMI_ENTER
13 bool 13 bool
14 help
15 See Documentation/trace/ftrace-implementation.txt
14 16
15config HAVE_FUNCTION_TRACER 17config HAVE_FUNCTION_TRACER
16 bool 18 bool
19 help
20 See Documentation/trace/ftrace-implementation.txt
17 21
18config HAVE_FUNCTION_GRAPH_TRACER 22config HAVE_FUNCTION_GRAPH_TRACER
19 bool 23 bool
24 help
25 See Documentation/trace/ftrace-implementation.txt
20 26
21config HAVE_FUNCTION_GRAPH_FP_TEST 27config HAVE_FUNCTION_GRAPH_FP_TEST
22 bool 28 bool
@@ -28,21 +34,25 @@ config HAVE_FUNCTION_GRAPH_FP_TEST
28config HAVE_FUNCTION_TRACE_MCOUNT_TEST 34config HAVE_FUNCTION_TRACE_MCOUNT_TEST
29 bool 35 bool
30 help 36 help
31 This gets selected when the arch tests the function_trace_stop 37 See Documentation/trace/ftrace-implementation.txt
32 variable at the mcount call site. Otherwise, this variable
33 is tested by the called function.
34 38
35config HAVE_DYNAMIC_FTRACE 39config HAVE_DYNAMIC_FTRACE
36 bool 40 bool
41 help
42 See Documentation/trace/ftrace-implementation.txt
37 43
38config HAVE_FTRACE_MCOUNT_RECORD 44config HAVE_FTRACE_MCOUNT_RECORD
39 bool 45 bool
46 help
47 See Documentation/trace/ftrace-implementation.txt
40 48
41config HAVE_HW_BRANCH_TRACER 49config HAVE_HW_BRANCH_TRACER
42 bool 50 bool
43 51
44config HAVE_SYSCALL_TRACEPOINTS 52config HAVE_SYSCALL_TRACEPOINTS
45 bool 53 bool
54 help
55 See Documentation/trace/ftrace-implementation.txt
46 56
47config TRACER_MAX_TRACE 57config TRACER_MAX_TRACE
48 bool 58 bool
@@ -469,6 +479,18 @@ config FTRACE_STARTUP_TEST
469 functioning properly. It will do tests on all the configured 479 functioning properly. It will do tests on all the configured
470 tracers of ftrace. 480 tracers of ftrace.
471 481
482config EVENT_TRACE_TEST_SYSCALLS
483 bool "Run selftest on syscall events"
484 depends on FTRACE_STARTUP_TEST
485 help
486 This option will also enable testing every syscall event.
487 It only enables the event and disables it and runs various loads
488 with the event enabled. This adds a bit more time for kernel boot
489 up since it runs this on every system call defined.
490
491 TBD - enable a way to actually call the syscalls as we test their
492 events
493
472config MMIOTRACE 494config MMIOTRACE
473 bool "Memory mapped IO tracing" 495 bool "Memory mapped IO tracing"
474 depends on HAVE_MMIOTRACE_SUPPORT && PCI 496 depends on HAVE_MMIOTRACE_SUPPORT && PCI
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8b23d5670088..f7ab7fc162cc 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2062,9 +2062,9 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2062 int i, len = 0; 2062 int i, len = 0;
2063 char *search; 2063 char *search;
2064 2064
2065 if (glob && (strcmp(glob, "*") || !strlen(glob))) 2065 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2066 glob = NULL; 2066 glob = NULL;
2067 else { 2067 else if (glob) {
2068 int not; 2068 int not;
2069 2069
2070 type = ftrace_setup_glob(glob, strlen(glob), &search, &not); 2070 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index b588fd81f7f9..20c5f92e28a8 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -66,10 +66,14 @@ u64 notrace trace_clock(void)
66 * Used by plugins that need globally coherent timestamps. 66 * Used by plugins that need globally coherent timestamps.
67 */ 67 */
68 68
69static u64 prev_trace_clock_time; 69/* keep prev_time and lock in the same cacheline. */
70 70static struct {
71static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp = 71 u64 prev_time;
72 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 72 raw_spinlock_t lock;
73} trace_clock_struct ____cacheline_aligned_in_smp =
74 {
75 .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
76 };
73 77
74u64 notrace trace_clock_global(void) 78u64 notrace trace_clock_global(void)
75{ 79{
@@ -88,19 +92,19 @@ u64 notrace trace_clock_global(void)
88 if (unlikely(in_nmi())) 92 if (unlikely(in_nmi()))
89 goto out; 93 goto out;
90 94
91 __raw_spin_lock(&trace_clock_lock); 95 __raw_spin_lock(&trace_clock_struct.lock);
92 96
93 /* 97 /*
94 * TODO: if this happens often then maybe we should reset 98 * TODO: if this happens often then maybe we should reset
95 * my_scd->clock to prev_trace_clock_time+1, to make sure 99 * my_scd->clock to prev_time+1, to make sure
96 * we start ticking with the local clock from now on? 100 * we start ticking with the local clock from now on?
97 */ 101 */
98 if ((s64)(now - prev_trace_clock_time) < 0) 102 if ((s64)(now - trace_clock_struct.prev_time) < 0)
99 now = prev_trace_clock_time + 1; 103 now = trace_clock_struct.prev_time + 1;
100 104
101 prev_trace_clock_time = now; 105 trace_clock_struct.prev_time = now;
102 106
103 __raw_spin_unlock(&trace_clock_lock); 107 __raw_spin_unlock(&trace_clock_struct.lock);
104 108
105 out: 109 out:
106 raw_local_irq_restore(flags); 110 raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index c866d34e0144..a431748ddd6e 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -78,7 +78,7 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
78 __field_desc( int, graph_ent, depth ) 78 __field_desc( int, graph_ent, depth )
79 ), 79 ),
80 80
81 F_printk("--> %lx (%d)", __entry->graph_ent.func, __entry->depth) 81 F_printk("--> %lx (%d)", __entry->func, __entry->depth)
82); 82);
83 83
84/* Function return entry */ 84/* Function return entry */
@@ -97,8 +97,8 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
97 97
98 F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d", 98 F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d",
99 __entry->func, __entry->depth, 99 __entry->func, __entry->depth,
100 __entry->calltime, __entry->rettim, 100 __entry->calltime, __entry->rettime,
101 __entrty->depth) 101 __entry->depth)
102); 102);
103 103
104/* 104/*
@@ -116,15 +116,6 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
116 __field( unsigned char, next_state ) \ 116 __field( unsigned char, next_state ) \
117 __field( unsigned int, next_cpu ) 117 __field( unsigned int, next_cpu )
118 118
119#if 0
120FTRACE_ENTRY_STRUCT_ONLY(ctx_switch_entry,
121
122 F_STRUCT(
123 FTRACE_CTX_FIELDS
124 )
125);
126#endif
127
128FTRACE_ENTRY(context_switch, ctx_switch_entry, 119FTRACE_ENTRY(context_switch, ctx_switch_entry,
129 120
130 TRACE_CTX, 121 TRACE_CTX,
@@ -133,7 +124,7 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry,
133 FTRACE_CTX_FIELDS 124 FTRACE_CTX_FIELDS
134 ), 125 ),
135 126
136 F_printk(b"%u:%u:%u ==> %u:%u:%u [%03u]", 127 F_printk("%u:%u:%u ==> %u:%u:%u [%03u]",
137 __entry->prev_pid, __entry->prev_prio, __entry->prev_state, 128 __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
138 __entry->next_pid, __entry->next_prio, __entry->next_state, 129 __entry->next_pid, __entry->next_prio, __entry->next_state,
139 __entry->next_cpu 130 __entry->next_cpu
@@ -257,8 +248,8 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
257 __field_desc( unsigned char, rw, width ) 248 __field_desc( unsigned char, rw, width )
258 ), 249 ),
259 250
260 F_printk("%lx %lx %lx %d %lx %lx", 251 F_printk("%lx %lx %lx %d %x %x",
261 __entry->phs, __entry->value, __entry->pc, 252 (unsigned long)__entry->phys, __entry->value, __entry->pc,
262 __entry->map_id, __entry->opcode, __entry->width) 253 __entry->map_id, __entry->opcode, __entry->width)
263); 254);
264 255
@@ -275,8 +266,8 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
275 __field_desc( unsigned char, map, opcode ) 266 __field_desc( unsigned char, map, opcode )
276 ), 267 ),
277 268
278 F_printk("%lx %lx %lx %d %lx", 269 F_printk("%lx %lx %lx %d %x",
279 __entry->phs, __entry->virt, __entry->len, 270 (unsigned long)__entry->phys, __entry->virt, __entry->len,
280 __entry->map_id, __entry->opcode) 271 __entry->map_id, __entry->opcode)
281); 272);
282 273
@@ -370,7 +361,7 @@ FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
370 __field( int, node ) 361 __field( int, node )
371 ), 362 ),
372 363
373 F_printk("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu" 364 F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi"
374 " flags:%x node:%d", 365 " flags:%x node:%d",
375 __entry->type_id, __entry->call_site, __entry->ptr, 366 __entry->type_id, __entry->call_site, __entry->ptr,
376 __entry->bytes_req, __entry->bytes_alloc, 367 __entry->bytes_req, __entry->bytes_alloc,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index adbed124c3e7..787f0fb0994e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1154,7 +1154,7 @@ static int trace_module_notify(struct notifier_block *self,
1154} 1154}
1155#endif /* CONFIG_MODULES */ 1155#endif /* CONFIG_MODULES */
1156 1156
1157struct notifier_block trace_module_nb = { 1157static struct notifier_block trace_module_nb = {
1158 .notifier_call = trace_module_notify, 1158 .notifier_call = trace_module_notify,
1159 .priority = 0, 1159 .priority = 0,
1160}; 1160};
@@ -1326,6 +1326,18 @@ static __init void event_trace_self_tests(void)
1326 if (!call->regfunc) 1326 if (!call->regfunc)
1327 continue; 1327 continue;
1328 1328
1329/*
1330 * Testing syscall events here is pretty useless, but
1331 * we still do it if configured. But this is time consuming.
1332 * What we really need is a user thread to perform the
1333 * syscalls as we test.
1334 */
1335#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1336 if (call->system &&
1337 strcmp(call->system, "syscalls") == 0)
1338 continue;
1339#endif
1340
1329 pr_info("Testing event %s: ", call->name); 1341 pr_info("Testing event %s: ", call->name);
1330 1342
1331 /* 1343 /*
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 4cb29d84d73a..9753fcc61bc5 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -23,6 +23,47 @@
23#define __field_struct(type, item) 23#define __field_struct(type, item)
24 24
25#undef __field 25#undef __field
26#define __field(type, item) type item;
27
28#undef __field_desc
29#define __field_desc(type, container, item) type item;
30
31#undef __array
32#define __array(type, item, size) type item[size];
33
34#undef __array_desc
35#define __array_desc(type, container, item, size) type item[size];
36
37#undef __dynamic_array
38#define __dynamic_array(type, item) type item[];
39
40#undef F_STRUCT
41#define F_STRUCT(args...) args
42
43#undef F_printk
44#define F_printk(fmt, args...) fmt, args
45
46#undef FTRACE_ENTRY
47#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
48struct ____ftrace_##name { \
49 tstruct \
50}; \
51static void __used ____ftrace_check_##name(void) \
52{ \
53 struct ____ftrace_##name *__entry = NULL; \
54 \
55 /* force cmpile-time check on F_printk() */ \
56 printk(print); \
57}
58
59#undef FTRACE_ENTRY_DUP
60#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \
61 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
62
63#include "trace_entries.h"
64
65
66#undef __field
26#define __field(type, item) \ 67#define __field(type, item) \
27 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ 68 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
28 "offset:%zu;\tsize:%zu;\n", \ 69 "offset:%zu;\tsize:%zu;\n", \
@@ -88,10 +129,6 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
88 return ret; \ 129 return ret; \
89} 130}
90 131
91#undef FTRACE_ENTRY_DUP
92#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \
93 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
94
95#include "trace_entries.h" 132#include "trace_entries.h"
96 133
97 134
@@ -172,32 +209,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
172#undef __dynamic_array 209#undef __dynamic_array
173#define __dynamic_array(type, item) 210#define __dynamic_array(type, item)
174 211
175
176#undef TRACE_ZERO_CHAR
177#define TRACE_ZERO_CHAR(arg)
178
179#undef TRACE_FIELD
180#define TRACE_FIELD(type, item, assign)\
181 entry->item = assign;
182
183#undef TRACE_FIELD
184#define TRACE_FIELD(type, item, assign)\
185 entry->item = assign;
186
187#undef TRACE_FIELD_SIGN
188#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \
189 TRACE_FIELD(type, item, assign)
190
191#undef TP_CMD
192#define TP_CMD(cmd...) cmd
193
194#undef TRACE_ENTRY
195#define TRACE_ENTRY entry
196
197#undef TRACE_FIELD_SPECIAL
198#define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \
199 cmd;
200
201#undef FTRACE_ENTRY 212#undef FTRACE_ENTRY
202#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ 213#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
203static int ftrace_raw_init_event_##call(void); \ 214static int ftrace_raw_init_event_##call(void); \