aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-11-12 00:14:40 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-12 05:52:02 -0500
commit52f232cb720a7babb752849cbc2cab2d24021209 (patch)
tree47c7e800549457bd5ab9b54f47729acac6e10780 /kernel/trace
parent1f0d69a9fc815db82f15722bf05227190b1d714d (diff)
tracing: likely/unlikely branch annotation tracer
Impact: new likely/unlikely branch tracer This patch adds a way to record the instances of the likely() and unlikely() branch condition annotations. When "unlikely" is set in /debugfs/tracing/iter_ctrl the unlikely conditions will be added to any of the ftrace tracers. The change takes effect when a new tracer is passed into the current_tracer file. For example: bash-3471 [003] 357.014755: [INCORRECT] sched_info_dequeued:sched_stats.h:177 bash-3471 [003] 357.014756: [correct] update_curr:sched_fair.c:489 bash-3471 [003] 357.014758: [correct] calc_delta_fair:sched_fair.c:411 bash-3471 [003] 357.014759: [correct] account_group_exec_runtime:sched_stats.h:356 bash-3471 [003] 357.014761: [correct] update_curr:sched_fair.c:489 bash-3471 [003] 357.014763: [INCORRECT] calc_delta_fair:sched_fair.c:411 bash-3471 [003] 357.014765: [correct] calc_delta_mine:sched.c:1279 Which shows the normal tracer heading, as well as whether the condition was correct "[correct]" or was mistaken "[INCORRECT]", followed by the function, file name and line number. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig22
-rw-r--r--kernel/trace/Makefile6
-rw-r--r--kernel/trace/trace.c29
-rw-r--r--kernel/trace/trace.h39
-rw-r--r--kernel/trace/trace_unlikely.c114
5 files changed, 210 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a604f24c755f..8abcaf821beb 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -175,6 +175,28 @@ config TRACE_UNLIKELY_PROFILE
175 175
176 Say N if unsure. 176 Say N if unsure.
177 177
178config TRACING_UNLIKELY
179 bool
180 help
181 Selected by tracers that will trace the likely and unlikely
182 conditions. This prevents the tracers themselves from being
183 profiled. Profiling the tracing infrastructure can only happen
184 when the likelys and unlikelys are not being traced.
185
186config UNLIKELY_TRACER
187 bool "Trace likely/unlikely instances"
188 depends on TRACE_UNLIKELY_PROFILE
189 select TRACING_UNLIKELY
190 help
191 This traces the events of likely and unlikely condition
192 calls in the kernel. The difference between this and the
193 "Trace likely/unlikely profiler" is that this is not a
194 histogram of the callers, but actually places the calling
195 events into a running trace buffer to see when and where the
196 events happened, as well as their results.
197
198 Say N if unsure.
199
178config STACK_TRACER 200config STACK_TRACER
179 bool "Trace max stack" 201 bool "Trace max stack"
180 depends on HAVE_FUNCTION_TRACER 202 depends on HAVE_FUNCTION_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 98e70ee27986..c938d03516c0 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -10,6 +10,12 @@ CFLAGS_trace_selftest_dynamic.o = -pg
10obj-y += trace_selftest_dynamic.o 10obj-y += trace_selftest_dynamic.o
11endif 11endif
12 12
13# If unlikely tracing is enabled, do not trace these files
14ifdef CONFIG_TRACING_UNLIKELY
15KBUILD_CFLAGS += '-Dlikely(x)=likely_notrace(x)'
16KBUILD_CFLAGS += '-Dunlikely(x)=unlikely_notrace(x)'
17endif
18
13obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o 19obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
14obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 20obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
15 21
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a3f7ae9cd8e1..83d38634bc90 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -258,6 +258,9 @@ static const char *trace_options[] = {
258 "sched-tree", 258 "sched-tree",
259 "ftrace_printk", 259 "ftrace_printk",
260 "ftrace_preempt", 260 "ftrace_preempt",
261#ifdef CONFIG_UNLIKELY_TRACER
262 "unlikely",
263#endif
261 NULL 264 NULL
262}; 265};
263 266
@@ -1648,6 +1651,18 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1648 trace_seq_print_cont(s, iter); 1651 trace_seq_print_cont(s, iter);
1649 break; 1652 break;
1650 } 1653 }
1654 case TRACE_UNLIKELY: {
1655 struct trace_unlikely *field;
1656
1657 trace_assign_type(field, entry);
1658
1659 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1660 field->correct ? "correct" : "INCORRECT",
1661 field->func,
1662 field->file,
1663 field->line);
1664 break;
1665 }
1651 default: 1666 default:
1652 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1667 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1653 } 1668 }
@@ -1787,6 +1802,18 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1787 return print_return_function(iter); 1802 return print_return_function(iter);
1788 break; 1803 break;
1789 } 1804 }
1805 case TRACE_UNLIKELY: {
1806 struct trace_unlikely *field;
1807
1808 trace_assign_type(field, entry);
1809
1810 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1811 field->correct ? "correct" : "INCORRECT",
1812 field->func,
1813 field->file,
1814 field->line);
1815 break;
1816 }
1790 } 1817 }
1791 return TRACE_TYPE_HANDLED; 1818 return TRACE_TYPE_HANDLED;
1792} 1819}
@@ -2592,6 +2619,7 @@ static int tracing_set_tracer(char *buf)
2592 if (t == current_trace) 2619 if (t == current_trace)
2593 goto out; 2620 goto out;
2594 2621
2622 trace_unlikely_disable();
2595 if (current_trace && current_trace->reset) 2623 if (current_trace && current_trace->reset)
2596 current_trace->reset(tr); 2624 current_trace->reset(tr);
2597 2625
@@ -2599,6 +2627,7 @@ static int tracing_set_tracer(char *buf)
2599 if (t->init) 2627 if (t->init)
2600 t->init(tr); 2628 t->init(tr);
2601 2629
2630 trace_unlikely_enable(tr);
2602 out: 2631 out:
2603 mutex_unlock(&trace_types_lock); 2632 mutex_unlock(&trace_types_lock);
2604 2633
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index b5f91f198fd4..9635aa2c4fc1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -22,6 +22,7 @@ enum trace_type {
22 TRACE_SPECIAL, 22 TRACE_SPECIAL,
23 TRACE_MMIO_RW, 23 TRACE_MMIO_RW,
24 TRACE_MMIO_MAP, 24 TRACE_MMIO_MAP,
25 TRACE_UNLIKELY,
25 TRACE_BOOT_CALL, 26 TRACE_BOOT_CALL,
26 TRACE_BOOT_RET, 27 TRACE_BOOT_RET,
27 TRACE_FN_RET, 28 TRACE_FN_RET,
@@ -134,6 +135,16 @@ struct trace_boot_ret {
134 struct boot_trace_ret boot_ret; 135 struct boot_trace_ret boot_ret;
135}; 136};
136 137
138#define TRACE_FUNC_SIZE 30
139#define TRACE_FILE_SIZE 20
140struct trace_unlikely {
141 struct trace_entry ent;
142 unsigned line;
143 char func[TRACE_FUNC_SIZE+1];
144 char file[TRACE_FILE_SIZE+1];
145 char correct;
146};
147
137/* 148/*
138 * trace_flag_type is an enumeration that holds different 149 * trace_flag_type is an enumeration that holds different
139 * states when a trace occurs. These are: 150 * states when a trace occurs. These are:
@@ -236,6 +247,7 @@ extern void __ftrace_bad_type(void);
236 TRACE_MMIO_MAP); \ 247 TRACE_MMIO_MAP); \
237 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 248 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
238 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 249 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
250 IF_ASSIGN(var, ent, struct trace_unlikely, TRACE_UNLIKELY); \
239 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\ 251 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
240 __ftrace_bad_type(); \ 252 __ftrace_bad_type(); \
241 } while (0) 253 } while (0)
@@ -456,6 +468,9 @@ enum trace_iterator_flags {
456 TRACE_ITER_SCHED_TREE = 0x200, 468 TRACE_ITER_SCHED_TREE = 0x200,
457 TRACE_ITER_PRINTK = 0x400, 469 TRACE_ITER_PRINTK = 0x400,
458 TRACE_ITER_PREEMPTONLY = 0x800, 470 TRACE_ITER_PREEMPTONLY = 0x800,
471#ifdef CONFIG_UNLIKELY_TRACER
472 TRACE_ITER_UNLIKELY = 0x1000,
473#endif
459}; 474};
460 475
461/* 476/*
@@ -515,4 +530,28 @@ static inline void ftrace_preempt_enable(int resched)
515 preempt_enable_notrace(); 530 preempt_enable_notrace();
516} 531}
517 532
533#ifdef CONFIG_UNLIKELY_TRACER
534extern int enable_unlikely_tracing(struct trace_array *tr);
535extern void disable_unlikely_tracing(void);
536static inline int trace_unlikely_enable(struct trace_array *tr)
537{
538 if (trace_flags & TRACE_ITER_UNLIKELY)
539 return enable_unlikely_tracing(tr);
540 return 0;
541}
542static inline void trace_unlikely_disable(void)
543{
544 /* due to races, always disable */
545 disable_unlikely_tracing();
546}
547#else
548static inline int trace_unlikely_enable(struct trace_array *tr)
549{
550 return 0;
551}
552static inline void trace_unlikely_disable(void)
553{
554}
555#endif /* CONFIG_UNLIKELY_TRACER */
556
518#endif /* _LINUX_KERNEL_TRACE_H */ 557#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_unlikely.c b/kernel/trace/trace_unlikely.c
index 94932696069f..7290e0e7b4e3 100644
--- a/kernel/trace/trace_unlikely.c
+++ b/kernel/trace/trace_unlikely.c
@@ -15,8 +15,122 @@
15#include <asm/local.h> 15#include <asm/local.h>
16#include "trace.h" 16#include "trace.h"
17 17
18#ifdef CONFIG_UNLIKELY_TRACER
19
20static int unlikely_tracing_enabled __read_mostly;
21static DEFINE_MUTEX(unlikely_tracing_mutex);
22static struct trace_array *unlikely_tracer;
23
24static void
25probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
26{
27 struct trace_array *tr = unlikely_tracer;
28 struct ring_buffer_event *event;
29 struct trace_unlikely *entry;
30 unsigned long flags, irq_flags;
31 int cpu, pc;
32 const char *p;
33
34 /*
35 * I would love to save just the ftrace_likely_data pointer, but
36 * this code can also be used by modules. Ugly things can happen
37 * if the module is unloaded, and then we go and read the
38 * pointer. This is slower, but much safer.
39 */
40
41 if (unlikely(!tr))
42 return;
43
44 local_irq_save(flags);
45 cpu = raw_smp_processor_id();
46 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
47 goto out;
48
49 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
50 &irq_flags);
51 if (!event)
52 goto out;
53
54 pc = preempt_count();
55 entry = ring_buffer_event_data(event);
56 tracing_generic_entry_update(&entry->ent, flags, pc);
57 entry->ent.type = TRACE_UNLIKELY;
58
59 /* Strip off the path, only save the file */
60 p = f->file + strlen(f->file);
61 while (p >= f->file && *p != '/')
62 p--;
63 p++;
64
65 strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
66 strncpy(entry->file, p, TRACE_FILE_SIZE);
67 entry->func[TRACE_FUNC_SIZE] = 0;
68 entry->file[TRACE_FILE_SIZE] = 0;
69 entry->line = f->line;
70 entry->correct = val == expect;
71
72 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
73
74 out:
75 atomic_dec(&tr->data[cpu]->disabled);
76 local_irq_restore(flags);
77}
78
79static inline
80void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
81{
82 if (!unlikely_tracing_enabled)
83 return;
84
85 probe_likely_condition(f, val, expect);
86}
87
88int enable_unlikely_tracing(struct trace_array *tr)
89{
90 int ret = 0;
91
92 mutex_lock(&unlikely_tracing_mutex);
93 unlikely_tracer = tr;
94 /*
95 * Must be seen before enabling. The reader is a condition
96 * where we do not need a matching rmb()
97 */
98 smp_wmb();
99 unlikely_tracing_enabled++;
100 mutex_unlock(&unlikely_tracing_mutex);
101
102 return ret;
103}
104
105void disable_unlikely_tracing(void)
106{
107 mutex_lock(&unlikely_tracing_mutex);
108
109 if (!unlikely_tracing_enabled)
110 goto out_unlock;
111
112 unlikely_tracing_enabled--;
113
114 out_unlock:
115 mutex_unlock(&unlikely_tracing_mutex);
116}
117#else
118static inline
119void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
120{
121}
122#endif /* CONFIG_UNLIKELY_TRACER */
123
18void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect) 124void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
19{ 125{
126 /*
127 * I would love to have a trace point here instead, but the
128 * trace point code is so inundated with unlikely and likely
129 * conditions that the recursive nightmare that exists is too
130 * much to try to get working. At least for now.
131 */
132 trace_likely_condition(f, val, expect);
133
20 /* FIXME: Make this atomic! */ 134 /* FIXME: Make this atomic! */
21 if (val == expect) 135 if (val == expect)
22 f->correct++; 136 f->correct++;