diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-11-12 00:14:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-12 05:52:02 -0500 |
commit | 1f0d69a9fc815db82f15722bf05227190b1d714d (patch) | |
tree | 21e0a9664fd47827d9168938a965b39b85b6e287 /include/linux/compiler.h | |
parent | cb9382e5a94e54d0356d730954396c746ae66d6e (diff) |
tracing: profile likely and unlikely annotations
Impact: new unlikely/likely profiler
Andrew Morton recently suggested having an in-kernel way to profile
likely and unlikely macros. This patch achieves that goal.
When configured, every(*) likely and unlikely macro gets a counter attached
to it. When the condition is hit, the hit and misses of that condition
are recorded. These numbers can later be retrieved by:
/debugfs/tracing/profile_likely - All likely markers
/debugfs/tracing/profile_unlikely - All unlikely markers.
# cat /debug/tracing/profile_unlikely | head
correct incorrect % Function File Line
------- --------- - -------- ---- ----
2167 0 0 do_arch_prctl process_64.c 832
0 0 0 do_arch_prctl process_64.c 804
2670 0 0 IS_ERR err.h 34
71230 5693 7 __switch_to process_64.c 673
76919 0 0 __switch_to process_64.c 639
43184 33743 43 __switch_to process_64.c 624
12740 64181 83 __switch_to process_64.c 594
12740 64174 83 __switch_to process_64.c 590
# cat /debug/tracing/profile_unlikely | \
awk '{ if ($3 > 25) print $0; }' |head -20
44963 35259 43 __switch_to process_64.c 624
12762 67454 84 __switch_to process_64.c 594
12762 67447 84 __switch_to process_64.c 590
1478 595 28 syscall_get_error syscall.h 51
0 2821 100 syscall_trace_leave ptrace.c 1567
0 1 100 native_smp_prepare_cpus smpboot.c 1237
86338 265881 75 calc_delta_fair sched_fair.c 408
210410 108540 34 calc_delta_mine sched.c 1267
0 54550 100 sched_info_queued sched_stats.h 222
51899 66435 56 pick_next_task_fair sched_fair.c 1422
6 10 62 yield_task_fair sched_fair.c 982
7325 2692 26 rt_policy sched.c 144
0 1270 100 pre_schedule_rt sched_rt.c 1261
1268 48073 97 pick_next_task_rt sched_rt.c 884
0 45181 100 sched_info_dequeued sched_stats.h 177
0 15 100 sched_move_task sched.c 8700
0 15 100 sched_move_task sched.c 8690
53167 33217 38 schedule sched.c 4457
0 80208 100 sched_info_switch sched_stats.h 270
30585 49631 61 context_switch sched.c 2619
# cat /debug/tracing/profile_likely | awk '{ if ($3 > 25) print $0; }'
39900 36577 47 pick_next_task sched.c 4397
20824 15233 42 switch_mm mmu_context_64.h 18
0 7 100 __cancel_work_timer workqueue.c 560
617 66484 99 clocksource_adjust timekeeping.c 456
0 346340 100 audit_syscall_exit auditsc.c 1570
38 347350 99 audit_get_context auditsc.c 732
0 345244 100 audit_syscall_entry auditsc.c 1541
38 1017 96 audit_free auditsc.c 1446
0 1090 100 audit_alloc auditsc.c 862
2618 1090 29 audit_alloc auditsc.c 858
0 6 100 move_masked_irq migration.c 9
1 198 99 probe_sched_wakeup trace_sched_switch.c 58
2 2 50 probe_wakeup trace_sched_wakeup.c 227
0 2 100 probe_wakeup_sched_switch trace_sched_wakeup.c 144
4514 2090 31 __grab_cache_page filemap.c 2149
12882 228786 94 mapping_unevictable pagemap.h 50
4 11 73 __flush_cpu_slab slub.c 1466
627757 330451 34 slab_free slub.c 1731
2959 61245 95 dentry_lru_del_init dcache.c 153
946 1217 56 load_elf_binary binfmt_elf.c 904
102 82 44 disk_put_part genhd.h 206
1 1 50 dst_gc_task dst.c 82
0 19 100 tcp_mss_split_point tcp_output.c 1126
As you can see by the above, there's a bit of work to do in rethinking
the use of some unlikelys and likelys. Note: the unlikely case had 71 hits
that were more than 25%.
Note: After submitting my first version of this patch, Andrew Morton
showed me a version written by Daniel Walker, where I picked up
the following ideas from:
1) Using __builtin_constant_p to avoid profiling fixed values.
2) Using __FILE__ instead of instruction pointers.
3) Using the preprocessor to stop all profiling of likely
annotations from vsyscall_64.c.
Thanks to Andrew Morton, Arjan van de Ven, Theodore Tso and Ingo Molnar
for their feed back on this patch.
(*) Not ever unlikely is recorded, those that are used by vsyscalls
(a few of them) had to have profiling disabled.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Theodore Tso <tytso@mit.edu>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r-- | include/linux/compiler.h | 61 |
1 files changed, 59 insertions, 2 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 98115d9d04da..935e30cfaf3c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -59,8 +59,65 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
59 | * specific implementations come from the above header files | 59 | * specific implementations come from the above header files |
60 | */ | 60 | */ |
61 | 61 | ||
62 | #define likely(x) __builtin_expect(!!(x), 1) | 62 | #ifdef CONFIG_TRACE_UNLIKELY_PROFILE |
63 | #define unlikely(x) __builtin_expect(!!(x), 0) | 63 | struct ftrace_likely_data { |
64 | const char *func; | ||
65 | const char *file; | ||
66 | unsigned line; | ||
67 | unsigned long correct; | ||
68 | unsigned long incorrect; | ||
69 | }; | ||
70 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect); | ||
71 | |||
72 | #define likely_notrace(x) __builtin_expect(!!(x), 1) | ||
73 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) | ||
74 | |||
75 | #define likely_check(x) ({ \ | ||
76 | int ______r; \ | ||
77 | static struct ftrace_likely_data \ | ||
78 | __attribute__((__aligned__(4))) \ | ||
79 | __attribute__((section("_ftrace_likely"))) \ | ||
80 | ______f = { \ | ||
81 | .func = __func__, \ | ||
82 | .file = __FILE__, \ | ||
83 | .line = __LINE__, \ | ||
84 | }; \ | ||
85 | ______f.line = __LINE__; \ | ||
86 | ______r = likely_notrace(x); \ | ||
87 | ftrace_likely_update(&______f, ______r, 1); \ | ||
88 | ______r; \ | ||
89 | }) | ||
90 | #define unlikely_check(x) ({ \ | ||
91 | int ______r; \ | ||
92 | static struct ftrace_likely_data \ | ||
93 | __attribute__((__aligned__(4))) \ | ||
94 | __attribute__((section("_ftrace_unlikely"))) \ | ||
95 | ______f = { \ | ||
96 | .func = __func__, \ | ||
97 | .file = __FILE__, \ | ||
98 | .line = __LINE__, \ | ||
99 | }; \ | ||
100 | ______f.line = __LINE__; \ | ||
101 | ______r = unlikely_notrace(x); \ | ||
102 | ftrace_likely_update(&______f, ______r, 0); \ | ||
103 | ______r; \ | ||
104 | }) | ||
105 | |||
106 | /* | ||
107 | * Using __builtin_constant_p(x) to ignore cases where the return | ||
108 | * value is always the same. This idea is taken from a similar patch | ||
109 | * written by Daniel Walker. | ||
110 | */ | ||
111 | # ifndef likely | ||
112 | # define likely(x) (__builtin_constant_p(x) ? !!(x) : likely_check(x)) | ||
113 | # endif | ||
114 | # ifndef unlikely | ||
115 | # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : unlikely_check(x)) | ||
116 | # endif | ||
117 | #else | ||
118 | # define likely(x) __builtin_expect(!!(x), 1) | ||
119 | # define unlikely(x) __builtin_expect(!!(x), 0) | ||
120 | #endif | ||
64 | 121 | ||
65 | /* Optimization barrier */ | 122 | /* Optimization barrier */ |
66 | #ifndef barrier | 123 | #ifndef barrier |