aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig115
-rw-r--r--kernel/trace/Makefile9
-rw-r--r--kernel/trace/ftrace.c929
-rw-r--r--kernel/trace/ring_buffer.c709
-rw-r--r--kernel/trace/trace.c976
-rw-r--r--kernel/trace/trace.h265
-rw-r--r--kernel/trace/trace_boot.c158
-rw-r--r--kernel/trace/trace_branch.c342
-rw-r--r--kernel/trace/trace_functions.c30
-rw-r--r--kernel/trace/trace_functions_graph.c669
-rw-r--r--kernel/trace/trace_hw_branches.c195
-rw-r--r--kernel/trace/trace_irqsoff.c61
-rw-r--r--kernel/trace/trace_mmiotrace.c33
-rw-r--r--kernel/trace/trace_nop.c65
-rw-r--r--kernel/trace/trace_power.c179
-rw-r--r--kernel/trace/trace_sched_switch.c121
-rw-r--r--kernel/trace/trace_sched_wakeup.c72
-rw-r--r--kernel/trace/trace_selftest.c173
-rw-r--r--kernel/trace/trace_stack.c70
-rw-r--r--kernel/trace/trace_sysprof.c31
20 files changed, 4400 insertions, 802 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 33dbefd471e8..e2a4ff6fc3a6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -3,18 +3,34 @@
3# select HAVE_FUNCTION_TRACER: 3# select HAVE_FUNCTION_TRACER:
4# 4#
5 5
6config USER_STACKTRACE_SUPPORT
7 bool
8
6config NOP_TRACER 9config NOP_TRACER
7 bool 10 bool
8 11
9config HAVE_FUNCTION_TRACER 12config HAVE_FUNCTION_TRACER
10 bool 13 bool
11 14
15config HAVE_FUNCTION_GRAPH_TRACER
16 bool
17
18config HAVE_FUNCTION_TRACE_MCOUNT_TEST
19 bool
20 help
21 This gets selected when the arch tests the function_trace_stop
22 variable at the mcount call site. Otherwise, this variable
23 is tested by the called function.
24
12config HAVE_DYNAMIC_FTRACE 25config HAVE_DYNAMIC_FTRACE
13 bool 26 bool
14 27
15config HAVE_FTRACE_MCOUNT_RECORD 28config HAVE_FTRACE_MCOUNT_RECORD
16 bool 29 bool
17 30
31config HAVE_HW_BRANCH_TRACER
32 bool
33
18config TRACER_MAX_TRACE 34config TRACER_MAX_TRACE
19 bool 35 bool
20 36
@@ -47,6 +63,20 @@ config FUNCTION_TRACER
47 (the bootup default), then the overhead of the instructions is very 63 (the bootup default), then the overhead of the instructions is very
48 small and not measurable even in micro-benchmarks. 64 small and not measurable even in micro-benchmarks.
49 65
66config FUNCTION_GRAPH_TRACER
67 bool "Kernel Function Graph Tracer"
68 depends on HAVE_FUNCTION_GRAPH_TRACER
69 depends on FUNCTION_TRACER
70 default y
71 help
72 Enable the kernel to trace a function at both its return
73 and its entry.
74 It's first purpose is to trace the duration of functions and
75 draw a call graph for each thread with some informations like
76 the return value.
77 This is done by setting the current return address on the current
78 task structure into a stack of calls.
79
50config IRQSOFF_TRACER 80config IRQSOFF_TRACER
51 bool "Interrupts-off Latency Tracer" 81 bool "Interrupts-off Latency Tracer"
52 default n 82 default n
@@ -138,6 +168,70 @@ config BOOT_TRACER
138 selected, because the self-tests are an initcall as well and that 168 selected, because the self-tests are an initcall as well and that
139 would invalidate the boot trace. ) 169 would invalidate the boot trace. )
140 170
171config TRACE_BRANCH_PROFILING
172 bool "Trace likely/unlikely profiler"
173 depends on DEBUG_KERNEL
174 select TRACING
175 help
176 This tracer profiles all the the likely and unlikely macros
177 in the kernel. It will display the results in:
178
179 /debugfs/tracing/profile_annotated_branch
180
181 Note: this will add a significant overhead, only turn this
182 on if you need to profile the system's use of these macros.
183
184 Say N if unsure.
185
186config PROFILE_ALL_BRANCHES
187 bool "Profile all if conditionals"
188 depends on TRACE_BRANCH_PROFILING
189 help
190 This tracer profiles all branch conditions. Every if ()
191 taken in the kernel is recorded whether it hit or miss.
192 The results will be displayed in:
193
194 /debugfs/tracing/profile_branch
195
196 This configuration, when enabled, will impose a great overhead
197 on the system. This should only be enabled when the system
198 is to be analyzed
199
200 Say N if unsure.
201
202config TRACING_BRANCHES
203 bool
204 help
205 Selected by tracers that will trace the likely and unlikely
206 conditions. This prevents the tracers themselves from being
207 profiled. Profiling the tracing infrastructure can only happen
208 when the likelys and unlikelys are not being traced.
209
210config BRANCH_TRACER
211 bool "Trace likely/unlikely instances"
212 depends on TRACE_BRANCH_PROFILING
213 select TRACING_BRANCHES
214 help
215 This traces the events of likely and unlikely condition
216 calls in the kernel. The difference between this and the
217 "Trace likely/unlikely profiler" is that this is not a
218 histogram of the callers, but actually places the calling
219 events into a running trace buffer to see when and where the
220 events happened, as well as their results.
221
222 Say N if unsure.
223
224config POWER_TRACER
225 bool "Trace power consumption behavior"
226 depends on DEBUG_KERNEL
227 depends on X86
228 select TRACING
229 help
230 This tracer helps developers to analyze and optimize the kernels
231 power management decisions, specifically the C-state and P-state
232 behavior.
233
234
141config STACK_TRACER 235config STACK_TRACER
142 bool "Trace max stack" 236 bool "Trace max stack"
143 depends on HAVE_FUNCTION_TRACER 237 depends on HAVE_FUNCTION_TRACER
@@ -150,13 +244,26 @@ config STACK_TRACER
150 244
151 This tracer works by hooking into every function call that the 245 This tracer works by hooking into every function call that the
152 kernel executes, and keeping a maximum stack depth value and 246 kernel executes, and keeping a maximum stack depth value and
153 stack-trace saved. Because this logic has to execute in every 247 stack-trace saved. If this is configured with DYNAMIC_FTRACE
154 kernel function, all the time, this option can slow down the 248 then it will not have any overhead while the stack tracer
155 kernel measurably and is generally intended for kernel 249 is disabled.
156 developers only. 250
251 To enable the stack tracer on bootup, pass in 'stacktrace'
252 on the kernel command line.
253
254 The stack tracer can also be enabled or disabled via the
255 sysctl kernel.stack_tracer_enabled
157 256
158 Say N if unsure. 257 Say N if unsure.
159 258
259config HW_BRANCH_TRACER
260 depends on HAVE_HW_BRANCH_TRACER
261 bool "Trace hw branches"
262 select TRACING
263 help
264 This tracer records all branches on the system in a circular
265 buffer giving access to the last N branches for each cpu.
266
160config DYNAMIC_FTRACE 267config DYNAMIC_FTRACE
161 bool "enable/disable ftrace tracepoints dynamically" 268 bool "enable/disable ftrace tracepoints dynamically"
162 depends on FUNCTION_TRACER 269 depends on FUNCTION_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index c8228b1a49e9..349d5a93653f 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -10,6 +10,11 @@ CFLAGS_trace_selftest_dynamic.o = -pg
10obj-y += trace_selftest_dynamic.o 10obj-y += trace_selftest_dynamic.o
11endif 11endif
12 12
13# If unlikely tracing is enabled, do not trace these files
14ifdef CONFIG_TRACING_BRANCHES
15KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
16endif
17
13obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o 18obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
14obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 19obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
15 20
@@ -24,5 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
24obj-$(CONFIG_STACK_TRACER) += trace_stack.o 29obj-$(CONFIG_STACK_TRACER) += trace_stack.o
25obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
26obj-$(CONFIG_BOOT_TRACER) += trace_boot.o 31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
32obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
34obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
35obj-$(CONFIG_POWER_TRACER) += trace_power.o
27 36
28libftrace-y := ftrace.o 37libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 78db083390f0..2f32969c09df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,6 +47,13 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* set when tracing only a pid */
51struct pid *ftrace_pid_trace;
52static struct pid * const ftrace_swapper_pid = &init_struct_pid;
53
54/* Quick disabling of function tracer. */
55int function_trace_stop;
56
50/* 57/*
51 * ftrace_disabled is set when an anomaly is discovered. 58 * ftrace_disabled is set when an anomaly is discovered.
52 * ftrace_disabled is much stronger than ftrace_enabled. 59 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -55,6 +62,7 @@ static int ftrace_disabled __read_mostly;
55 62
56static DEFINE_SPINLOCK(ftrace_lock); 63static DEFINE_SPINLOCK(ftrace_lock);
57static DEFINE_MUTEX(ftrace_sysctl_lock); 64static DEFINE_MUTEX(ftrace_sysctl_lock);
65static DEFINE_MUTEX(ftrace_start_lock);
58 66
59static struct ftrace_ops ftrace_list_end __read_mostly = 67static struct ftrace_ops ftrace_list_end __read_mostly =
60{ 68{
@@ -63,6 +71,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
63 71
64static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
65ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
66 76
67static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68{ 78{
@@ -79,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 }; 89 };
80} 90}
81 91
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
94 if (!test_tsk_trace_trace(current))
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
82/** 107/**
83 * clear_ftrace_function - reset the ftrace function 108 * clear_ftrace_function - reset the ftrace function
84 * 109 *
@@ -88,7 +113,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88void clear_ftrace_function(void) 113void clear_ftrace_function(void)
89{ 114{
90 ftrace_trace_function = ftrace_stub; 115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
118}
119
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
91} 131}
132#endif
92 133
93static int __register_ftrace_function(struct ftrace_ops *ops) 134static int __register_ftrace_function(struct ftrace_ops *ops)
94{ 135{
@@ -106,14 +147,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
106 ftrace_list = ops; 147 ftrace_list = ops;
107 148
108 if (ftrace_enabled) { 149 if (ftrace_enabled) {
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
157 if (ftrace_pid_trace) {
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
109 /* 162 /*
110 * For one func, simply call it directly. 163 * For one func, simply call it directly.
111 * For more than one func, call the chain. 164 * For more than one func, call the chain.
112 */ 165 */
113 if (ops->next == &ftrace_list_end) 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
114 ftrace_trace_function = ops->func; 167 ftrace_trace_function = func;
115 else 168#else
116 ftrace_trace_function = ftrace_list_func; 169 __ftrace_trace_function = func;
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
117 } 172 }
118 173
119 spin_unlock(&ftrace_lock); 174 spin_unlock(&ftrace_lock);
@@ -152,9 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
152 207
153 if (ftrace_enabled) { 208 if (ftrace_enabled) {
154 /* If we only have one func left, then call that directly */ 209 /* If we only have one func left, then call that directly */
155 if (ftrace_list == &ftrace_list_end || 210 if (ftrace_list->next == &ftrace_list_end) {
156 ftrace_list->next == &ftrace_list_end) 211 ftrace_func_t func = ftrace_list->func;
157 ftrace_trace_function = ftrace_list->func; 212
213 if (ftrace_pid_trace) {
214 set_ftrace_pid_function(func);
215 func = ftrace_pid_func;
216 }
217#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function = func;
219#else
220 __ftrace_trace_function = func;
221#endif
222 }
158 } 223 }
159 224
160 out: 225 out:
@@ -163,6 +228,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
163 return ret; 228 return ret;
164} 229}
165 230
231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock);
237
238 if (ftrace_trace_function == ftrace_stub)
239 goto out;
240
241 func = ftrace_trace_function;
242
243 if (ftrace_pid_trace) {
244 set_ftrace_pid_function(func);
245 func = ftrace_pid_func;
246 } else {
247 if (func == ftrace_pid_func)
248 func = ftrace_pid_function;
249 }
250
251#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function = func;
253#else
254 __ftrace_trace_function = func;
255#endif
256
257 out:
258 spin_unlock(&ftrace_lock);
259}
260
166#ifdef CONFIG_DYNAMIC_FTRACE 261#ifdef CONFIG_DYNAMIC_FTRACE
167#ifndef CONFIG_FTRACE_MCOUNT_RECORD 262#ifndef CONFIG_FTRACE_MCOUNT_RECORD
168# error Dynamic ftrace depends on MCOUNT_RECORD 263# error Dynamic ftrace depends on MCOUNT_RECORD
@@ -182,6 +277,8 @@ enum {
182 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 277 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
183 FTRACE_ENABLE_MCOUNT = (1 << 3), 278 FTRACE_ENABLE_MCOUNT = (1 << 3),
184 FTRACE_DISABLE_MCOUNT = (1 << 4), 279 FTRACE_DISABLE_MCOUNT = (1 << 4),
280 FTRACE_START_FUNC_RET = (1 << 5),
281 FTRACE_STOP_FUNC_RET = (1 << 6),
185}; 282};
186 283
187static int ftrace_filtered; 284static int ftrace_filtered;
@@ -308,7 +405,7 @@ ftrace_record_ip(unsigned long ip)
308{ 405{
309 struct dyn_ftrace *rec; 406 struct dyn_ftrace *rec;
310 407
311 if (!ftrace_enabled || ftrace_disabled) 408 if (ftrace_disabled)
312 return NULL; 409 return NULL;
313 410
314 rec = ftrace_alloc_dyn_node(ip); 411 rec = ftrace_alloc_dyn_node(ip);
@@ -322,14 +419,51 @@ ftrace_record_ip(unsigned long ip)
322 return rec; 419 return rec;
323} 420}
324 421
325#define FTRACE_ADDR ((long)(ftrace_caller)) 422static void print_ip_ins(const char *fmt, unsigned char *p)
423{
424 int i;
425
426 printk(KERN_CONT "%s", fmt);
427
428 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
429 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
430}
431
432static void ftrace_bug(int failed, unsigned long ip)
433{
434 switch (failed) {
435 case -EFAULT:
436 FTRACE_WARN_ON_ONCE(1);
437 pr_info("ftrace faulted on modifying ");
438 print_ip_sym(ip);
439 break;
440 case -EINVAL:
441 FTRACE_WARN_ON_ONCE(1);
442 pr_info("ftrace failed to modify ");
443 print_ip_sym(ip);
444 print_ip_ins(" actual: ", (unsigned char *)ip);
445 printk(KERN_CONT "\n");
446 break;
447 case -EPERM:
448 FTRACE_WARN_ON_ONCE(1);
449 pr_info("ftrace faulted on writing ");
450 print_ip_sym(ip);
451 break;
452 default:
453 FTRACE_WARN_ON_ONCE(1);
454 pr_info("ftrace faulted on unknown error ");
455 print_ip_sym(ip);
456 }
457}
458
326 459
327static int 460static int
328__ftrace_replace_code(struct dyn_ftrace *rec, 461__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
329 unsigned char *nop, int enable)
330{ 462{
331 unsigned long ip, fl; 463 unsigned long ip, fl;
332 unsigned char *call, *old, *new; 464 unsigned long ftrace_addr;
465
466 ftrace_addr = (unsigned long)ftrace_caller;
333 467
334 ip = rec->ip; 468 ip = rec->ip;
335 469
@@ -388,34 +522,28 @@ __ftrace_replace_code(struct dyn_ftrace *rec,
388 } 522 }
389 } 523 }
390 524
391 call = ftrace_call_replace(ip, FTRACE_ADDR); 525 if (rec->flags & FTRACE_FL_ENABLED)
392 526 return ftrace_make_call(rec, ftrace_addr);
393 if (rec->flags & FTRACE_FL_ENABLED) { 527 else
394 old = nop; 528 return ftrace_make_nop(NULL, rec, ftrace_addr);
395 new = call;
396 } else {
397 old = call;
398 new = nop;
399 }
400
401 return ftrace_modify_code(ip, old, new);
402} 529}
403 530
404static void ftrace_replace_code(int enable) 531static void ftrace_replace_code(int enable)
405{ 532{
406 int i, failed; 533 int i, failed;
407 unsigned char *nop = NULL;
408 struct dyn_ftrace *rec; 534 struct dyn_ftrace *rec;
409 struct ftrace_page *pg; 535 struct ftrace_page *pg;
410 536
411 nop = ftrace_nop_replace();
412
413 for (pg = ftrace_pages_start; pg; pg = pg->next) { 537 for (pg = ftrace_pages_start; pg; pg = pg->next) {
414 for (i = 0; i < pg->index; i++) { 538 for (i = 0; i < pg->index; i++) {
415 rec = &pg->records[i]; 539 rec = &pg->records[i];
416 540
417 /* don't modify code that has already faulted */ 541 /*
418 if (rec->flags & FTRACE_FL_FAILED) 542 * Skip over free records and records that have
543 * failed.
544 */
545 if (rec->flags & FTRACE_FL_FREE ||
546 rec->flags & FTRACE_FL_FAILED)
419 continue; 547 continue;
420 548
421 /* ignore updates to this record's mcount site */ 549 /* ignore updates to this record's mcount site */
@@ -426,68 +554,30 @@ static void ftrace_replace_code(int enable)
426 unfreeze_record(rec); 554 unfreeze_record(rec);
427 } 555 }
428 556
429 failed = __ftrace_replace_code(rec, nop, enable); 557 failed = __ftrace_replace_code(rec, enable);
430 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 558 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
431 rec->flags |= FTRACE_FL_FAILED; 559 rec->flags |= FTRACE_FL_FAILED;
432 if ((system_state == SYSTEM_BOOTING) || 560 if ((system_state == SYSTEM_BOOTING) ||
433 !core_kernel_text(rec->ip)) { 561 !core_kernel_text(rec->ip)) {
434 ftrace_free_rec(rec); 562 ftrace_free_rec(rec);
435 } 563 } else
564 ftrace_bug(failed, rec->ip);
436 } 565 }
437 } 566 }
438 } 567 }
439} 568}
440 569
441static void print_ip_ins(const char *fmt, unsigned char *p)
442{
443 int i;
444
445 printk(KERN_CONT "%s", fmt);
446
447 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
448 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
449}
450
451static int 570static int
452ftrace_code_disable(struct dyn_ftrace *rec) 571ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
453{ 572{
454 unsigned long ip; 573 unsigned long ip;
455 unsigned char *nop, *call;
456 int ret; 574 int ret;
457 575
458 ip = rec->ip; 576 ip = rec->ip;
459 577
460 nop = ftrace_nop_replace(); 578 ret = ftrace_make_nop(mod, rec, mcount_addr);
461 call = ftrace_call_replace(ip, mcount_addr);
462
463 ret = ftrace_modify_code(ip, call, nop);
464 if (ret) { 579 if (ret) {
465 switch (ret) { 580 ftrace_bug(ret, ip);
466 case -EFAULT:
467 FTRACE_WARN_ON_ONCE(1);
468 pr_info("ftrace faulted on modifying ");
469 print_ip_sym(ip);
470 break;
471 case -EINVAL:
472 FTRACE_WARN_ON_ONCE(1);
473 pr_info("ftrace failed to modify ");
474 print_ip_sym(ip);
475 print_ip_ins(" expected: ", call);
476 print_ip_ins(" actual: ", (unsigned char *)ip);
477 print_ip_ins(" replace: ", nop);
478 printk(KERN_CONT "\n");
479 break;
480 case -EPERM:
481 FTRACE_WARN_ON_ONCE(1);
482 pr_info("ftrace faulted on writing ");
483 print_ip_sym(ip);
484 break;
485 default:
486 FTRACE_WARN_ON_ONCE(1);
487 pr_info("ftrace faulted on unknown error ");
488 print_ip_sym(ip);
489 }
490
491 rec->flags |= FTRACE_FL_FAILED; 581 rec->flags |= FTRACE_FL_FAILED;
492 return 0; 582 return 0;
493 } 583 }
@@ -506,6 +596,11 @@ static int __ftrace_modify_code(void *data)
506 if (*command & FTRACE_UPDATE_TRACE_FUNC) 596 if (*command & FTRACE_UPDATE_TRACE_FUNC)
507 ftrace_update_ftrace_func(ftrace_trace_function); 597 ftrace_update_ftrace_func(ftrace_trace_function);
508 598
599 if (*command & FTRACE_START_FUNC_RET)
600 ftrace_enable_ftrace_graph_caller();
601 else if (*command & FTRACE_STOP_FUNC_RET)
602 ftrace_disable_ftrace_graph_caller();
603
509 return 0; 604 return 0;
510} 605}
511 606
@@ -515,43 +610,43 @@ static void ftrace_run_update_code(int command)
515} 610}
516 611
517static ftrace_func_t saved_ftrace_func; 612static ftrace_func_t saved_ftrace_func;
518static int ftrace_start; 613static int ftrace_start_up;
519static DEFINE_MUTEX(ftrace_start_lock);
520 614
521static void ftrace_startup(void) 615static void ftrace_startup_enable(int command)
522{ 616{
523 int command = 0;
524
525 if (unlikely(ftrace_disabled))
526 return;
527
528 mutex_lock(&ftrace_start_lock);
529 ftrace_start++;
530 command |= FTRACE_ENABLE_CALLS;
531
532 if (saved_ftrace_func != ftrace_trace_function) { 617 if (saved_ftrace_func != ftrace_trace_function) {
533 saved_ftrace_func = ftrace_trace_function; 618 saved_ftrace_func = ftrace_trace_function;
534 command |= FTRACE_UPDATE_TRACE_FUNC; 619 command |= FTRACE_UPDATE_TRACE_FUNC;
535 } 620 }
536 621
537 if (!command || !ftrace_enabled) 622 if (!command || !ftrace_enabled)
538 goto out; 623 return;
539 624
540 ftrace_run_update_code(command); 625 ftrace_run_update_code(command);
541 out:
542 mutex_unlock(&ftrace_start_lock);
543} 626}
544 627
545static void ftrace_shutdown(void) 628static void ftrace_startup(int command)
546{ 629{
547 int command = 0; 630 if (unlikely(ftrace_disabled))
631 return;
632
633 mutex_lock(&ftrace_start_lock);
634 ftrace_start_up++;
635 command |= FTRACE_ENABLE_CALLS;
548 636
637 ftrace_startup_enable(command);
638
639 mutex_unlock(&ftrace_start_lock);
640}
641
642static void ftrace_shutdown(int command)
643{
549 if (unlikely(ftrace_disabled)) 644 if (unlikely(ftrace_disabled))
550 return; 645 return;
551 646
552 mutex_lock(&ftrace_start_lock); 647 mutex_lock(&ftrace_start_lock);
553 ftrace_start--; 648 ftrace_start_up--;
554 if (!ftrace_start) 649 if (!ftrace_start_up)
555 command |= FTRACE_DISABLE_CALLS; 650 command |= FTRACE_DISABLE_CALLS;
556 651
557 if (saved_ftrace_func != ftrace_trace_function) { 652 if (saved_ftrace_func != ftrace_trace_function) {
@@ -577,8 +672,8 @@ static void ftrace_startup_sysctl(void)
577 mutex_lock(&ftrace_start_lock); 672 mutex_lock(&ftrace_start_lock);
578 /* Force update next time */ 673 /* Force update next time */
579 saved_ftrace_func = NULL; 674 saved_ftrace_func = NULL;
580 /* ftrace_start is true if we want ftrace running */ 675 /* ftrace_start_up is true if we want ftrace running */
581 if (ftrace_start) 676 if (ftrace_start_up)
582 command |= FTRACE_ENABLE_CALLS; 677 command |= FTRACE_ENABLE_CALLS;
583 678
584 ftrace_run_update_code(command); 679 ftrace_run_update_code(command);
@@ -593,8 +688,8 @@ static void ftrace_shutdown_sysctl(void)
593 return; 688 return;
594 689
595 mutex_lock(&ftrace_start_lock); 690 mutex_lock(&ftrace_start_lock);
596 /* ftrace_start is true if ftrace is running */ 691 /* ftrace_start_up is true if ftrace is running */
597 if (ftrace_start) 692 if (ftrace_start_up)
598 command |= FTRACE_DISABLE_CALLS; 693 command |= FTRACE_DISABLE_CALLS;
599 694
600 ftrace_run_update_code(command); 695 ftrace_run_update_code(command);
@@ -605,7 +700,7 @@ static cycle_t ftrace_update_time;
605static unsigned long ftrace_update_cnt; 700static unsigned long ftrace_update_cnt;
606unsigned long ftrace_update_tot_cnt; 701unsigned long ftrace_update_tot_cnt;
607 702
608static int ftrace_update_code(void) 703static int ftrace_update_code(struct module *mod)
609{ 704{
610 struct dyn_ftrace *p, *t; 705 struct dyn_ftrace *p, *t;
611 cycle_t start, stop; 706 cycle_t start, stop;
@@ -622,7 +717,7 @@ static int ftrace_update_code(void)
622 list_del_init(&p->list); 717 list_del_init(&p->list);
623 718
624 /* convert record (i.e, patch mcount-call with NOP) */ 719 /* convert record (i.e, patch mcount-call with NOP) */
625 if (ftrace_code_disable(p)) { 720 if (ftrace_code_disable(mod, p)) {
626 p->flags |= FTRACE_FL_CONVERTED; 721 p->flags |= FTRACE_FL_CONVERTED;
627 ftrace_update_cnt++; 722 ftrace_update_cnt++;
628 } else 723 } else
@@ -690,7 +785,6 @@ enum {
690#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 785#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
691 786
692struct ftrace_iterator { 787struct ftrace_iterator {
693 loff_t pos;
694 struct ftrace_page *pg; 788 struct ftrace_page *pg;
695 unsigned idx; 789 unsigned idx;
696 unsigned flags; 790 unsigned flags;
@@ -715,6 +809,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
715 iter->pg = iter->pg->next; 809 iter->pg = iter->pg->next;
716 iter->idx = 0; 810 iter->idx = 0;
717 goto retry; 811 goto retry;
812 } else {
813 iter->idx = -1;
718 } 814 }
719 } else { 815 } else {
720 rec = &iter->pg->records[iter->idx++]; 816 rec = &iter->pg->records[iter->idx++];
@@ -737,8 +833,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
737 } 833 }
738 spin_unlock(&ftrace_lock); 834 spin_unlock(&ftrace_lock);
739 835
740 iter->pos = *pos;
741
742 return rec; 836 return rec;
743} 837}
744 838
@@ -746,13 +840,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
746{ 840{
747 struct ftrace_iterator *iter = m->private; 841 struct ftrace_iterator *iter = m->private;
748 void *p = NULL; 842 void *p = NULL;
749 loff_t l = -1;
750 843
751 if (*pos > iter->pos) 844 if (*pos > 0) {
752 *pos = iter->pos; 845 if (iter->idx < 0)
846 return p;
847 (*pos)--;
848 iter->idx--;
849 }
753 850
754 l = *pos; 851 p = t_next(m, p, pos);
755 p = t_next(m, p, &l);
756 852
757 return p; 853 return p;
758} 854}
@@ -763,21 +859,15 @@ static void t_stop(struct seq_file *m, void *p)
763 859
764static int t_show(struct seq_file *m, void *v) 860static int t_show(struct seq_file *m, void *v)
765{ 861{
766 struct ftrace_iterator *iter = m->private;
767 struct dyn_ftrace *rec = v; 862 struct dyn_ftrace *rec = v;
768 char str[KSYM_SYMBOL_LEN]; 863 char str[KSYM_SYMBOL_LEN];
769 int ret = 0;
770 864
771 if (!rec) 865 if (!rec)
772 return 0; 866 return 0;
773 867
774 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 868 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
775 869
776 ret = seq_printf(m, "%s\n", str); 870 seq_printf(m, "%s\n", str);
777 if (ret < 0) {
778 iter->pos--;
779 iter->idx--;
780 }
781 871
782 return 0; 872 return 0;
783} 873}
@@ -803,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
803 return -ENOMEM; 893 return -ENOMEM;
804 894
805 iter->pg = ftrace_pages_start; 895 iter->pg = ftrace_pages_start;
806 iter->pos = 0;
807 896
808 ret = seq_open(file, &show_ftrace_seq_ops); 897 ret = seq_open(file, &show_ftrace_seq_ops);
809 if (!ret) { 898 if (!ret) {
@@ -890,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
890 979
891 if (file->f_mode & FMODE_READ) { 980 if (file->f_mode & FMODE_READ) {
892 iter->pg = ftrace_pages_start; 981 iter->pg = ftrace_pages_start;
893 iter->pos = 0;
894 iter->flags = enable ? FTRACE_ITER_FILTER : 982 iter->flags = enable ? FTRACE_ITER_FILTER :
895 FTRACE_ITER_NOTRACE; 983 FTRACE_ITER_NOTRACE;
896 984
@@ -959,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable)
959 int type = MATCH_FULL; 1047 int type = MATCH_FULL;
960 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1048 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
961 unsigned i, match = 0, search_len = 0; 1049 unsigned i, match = 0, search_len = 0;
1050 int not = 0;
1051
1052 if (buff[0] == '!') {
1053 not = 1;
1054 buff++;
1055 len--;
1056 }
962 1057
963 for (i = 0; i < len; i++) { 1058 for (i = 0; i < len; i++) {
964 if (buff[i] == '*') { 1059 if (buff[i] == '*') {
@@ -1012,8 +1107,12 @@ ftrace_match(unsigned char *buff, int len, int enable)
1012 matched = 1; 1107 matched = 1;
1013 break; 1108 break;
1014 } 1109 }
1015 if (matched) 1110 if (matched) {
1016 rec->flags |= flag; 1111 if (not)
1112 rec->flags &= ~flag;
1113 else
1114 rec->flags |= flag;
1115 }
1017 } 1116 }
1018 pg = pg->next; 1117 pg = pg->next;
1019 } 1118 }
@@ -1181,7 +1280,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1181 1280
1182 mutex_lock(&ftrace_sysctl_lock); 1281 mutex_lock(&ftrace_sysctl_lock);
1183 mutex_lock(&ftrace_start_lock); 1282 mutex_lock(&ftrace_start_lock);
1184 if (ftrace_start && ftrace_enabled) 1283 if (ftrace_start_up && ftrace_enabled)
1185 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1284 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1186 mutex_unlock(&ftrace_start_lock); 1285 mutex_unlock(&ftrace_start_lock);
1187 mutex_unlock(&ftrace_sysctl_lock); 1286 mutex_unlock(&ftrace_sysctl_lock);
@@ -1233,12 +1332,233 @@ static struct file_operations ftrace_notrace_fops = {
1233 .release = ftrace_notrace_release, 1332 .release = ftrace_notrace_release,
1234}; 1333};
1235 1334
1236static __init int ftrace_init_debugfs(void) 1335#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1336
1337static DEFINE_MUTEX(graph_lock);
1338
1339int ftrace_graph_count;
1340unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1341
1342static void *
1343g_next(struct seq_file *m, void *v, loff_t *pos)
1237{ 1344{
1238 struct dentry *d_tracer; 1345 unsigned long *array = m->private;
1239 struct dentry *entry; 1346 int index = *pos;
1240 1347
1241 d_tracer = tracing_init_dentry(); 1348 (*pos)++;
1349
1350 if (index >= ftrace_graph_count)
1351 return NULL;
1352
1353 return &array[index];
1354}
1355
1356static void *g_start(struct seq_file *m, loff_t *pos)
1357{
1358 void *p = NULL;
1359
1360 mutex_lock(&graph_lock);
1361
1362 p = g_next(m, p, pos);
1363
1364 return p;
1365}
1366
1367static void g_stop(struct seq_file *m, void *p)
1368{
1369 mutex_unlock(&graph_lock);
1370}
1371
1372static int g_show(struct seq_file *m, void *v)
1373{
1374 unsigned long *ptr = v;
1375 char str[KSYM_SYMBOL_LEN];
1376
1377 if (!ptr)
1378 return 0;
1379
1380 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1381
1382 seq_printf(m, "%s\n", str);
1383
1384 return 0;
1385}
1386
1387static struct seq_operations ftrace_graph_seq_ops = {
1388 .start = g_start,
1389 .next = g_next,
1390 .stop = g_stop,
1391 .show = g_show,
1392};
1393
1394static int
1395ftrace_graph_open(struct inode *inode, struct file *file)
1396{
1397 int ret = 0;
1398
1399 if (unlikely(ftrace_disabled))
1400 return -ENODEV;
1401
1402 mutex_lock(&graph_lock);
1403 if ((file->f_mode & FMODE_WRITE) &&
1404 !(file->f_flags & O_APPEND)) {
1405 ftrace_graph_count = 0;
1406 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1407 }
1408
1409 if (file->f_mode & FMODE_READ) {
1410 ret = seq_open(file, &ftrace_graph_seq_ops);
1411 if (!ret) {
1412 struct seq_file *m = file->private_data;
1413 m->private = ftrace_graph_funcs;
1414 }
1415 } else
1416 file->private_data = ftrace_graph_funcs;
1417 mutex_unlock(&graph_lock);
1418
1419 return ret;
1420}
1421
1422static ssize_t
1423ftrace_graph_read(struct file *file, char __user *ubuf,
1424 size_t cnt, loff_t *ppos)
1425{
1426 if (file->f_mode & FMODE_READ)
1427 return seq_read(file, ubuf, cnt, ppos);
1428 else
1429 return -EPERM;
1430}
1431
1432static int
1433ftrace_set_func(unsigned long *array, int idx, char *buffer)
1434{
1435 char str[KSYM_SYMBOL_LEN];
1436 struct dyn_ftrace *rec;
1437 struct ftrace_page *pg;
1438 int found = 0;
1439 int i, j;
1440
1441 if (ftrace_disabled)
1442 return -ENODEV;
1443
1444 /* should not be called from interrupt context */
1445 spin_lock(&ftrace_lock);
1446
1447 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1448 for (i = 0; i < pg->index; i++) {
1449 rec = &pg->records[i];
1450
1451 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1452 continue;
1453
1454 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1455 if (strcmp(str, buffer) == 0) {
1456 found = 1;
1457 for (j = 0; j < idx; j++)
1458 if (array[j] == rec->ip) {
1459 found = 0;
1460 break;
1461 }
1462 if (found)
1463 array[idx] = rec->ip;
1464 break;
1465 }
1466 }
1467 }
1468 spin_unlock(&ftrace_lock);
1469
1470 return found ? 0 : -EINVAL;
1471}
1472
1473static ssize_t
1474ftrace_graph_write(struct file *file, const char __user *ubuf,
1475 size_t cnt, loff_t *ppos)
1476{
1477 unsigned char buffer[FTRACE_BUFF_MAX+1];
1478 unsigned long *array;
1479 size_t read = 0;
1480 ssize_t ret;
1481 int index = 0;
1482 char ch;
1483
1484 if (!cnt || cnt < 0)
1485 return 0;
1486
1487 mutex_lock(&graph_lock);
1488
1489 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1490 ret = -EBUSY;
1491 goto out;
1492 }
1493
1494 if (file->f_mode & FMODE_READ) {
1495 struct seq_file *m = file->private_data;
1496 array = m->private;
1497 } else
1498 array = file->private_data;
1499
1500 ret = get_user(ch, ubuf++);
1501 if (ret)
1502 goto out;
1503 read++;
1504 cnt--;
1505
1506 /* skip white space */
1507 while (cnt && isspace(ch)) {
1508 ret = get_user(ch, ubuf++);
1509 if (ret)
1510 goto out;
1511 read++;
1512 cnt--;
1513 }
1514
1515 if (isspace(ch)) {
1516 *ppos += read;
1517 ret = read;
1518 goto out;
1519 }
1520
1521 while (cnt && !isspace(ch)) {
1522 if (index < FTRACE_BUFF_MAX)
1523 buffer[index++] = ch;
1524 else {
1525 ret = -EINVAL;
1526 goto out;
1527 }
1528 ret = get_user(ch, ubuf++);
1529 if (ret)
1530 goto out;
1531 read++;
1532 cnt--;
1533 }
1534 buffer[index] = 0;
1535
1536 /* we allow only one at a time */
1537 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1538 if (ret)
1539 goto out;
1540
1541 ftrace_graph_count++;
1542
1543 file->f_pos += read;
1544
1545 ret = read;
1546 out:
1547 mutex_unlock(&graph_lock);
1548
1549 return ret;
1550}
1551
1552static const struct file_operations ftrace_graph_fops = {
1553 .open = ftrace_graph_open,
1554 .read = ftrace_graph_read,
1555 .write = ftrace_graph_write,
1556};
1557#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1558
1559static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1560{
1561 struct dentry *entry;
1242 1562
1243 entry = debugfs_create_file("available_filter_functions", 0444, 1563 entry = debugfs_create_file("available_filter_functions", 0444,
1244 d_tracer, NULL, &ftrace_avail_fops); 1564 d_tracer, NULL, &ftrace_avail_fops);
@@ -1263,12 +1583,20 @@ static __init int ftrace_init_debugfs(void)
1263 pr_warning("Could not create debugfs " 1583 pr_warning("Could not create debugfs "
1264 "'set_ftrace_notrace' entry\n"); 1584 "'set_ftrace_notrace' entry\n");
1265 1585
1586#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1587 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1588 NULL,
1589 &ftrace_graph_fops);
1590 if (!entry)
1591 pr_warning("Could not create debugfs "
1592 "'set_graph_function' entry\n");
1593#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1594
1266 return 0; 1595 return 0;
1267} 1596}
1268 1597
1269fs_initcall(ftrace_init_debugfs); 1598static int ftrace_convert_nops(struct module *mod,
1270 1599 unsigned long *start,
1271static int ftrace_convert_nops(unsigned long *start,
1272 unsigned long *end) 1600 unsigned long *end)
1273{ 1601{
1274 unsigned long *p; 1602 unsigned long *p;
@@ -1279,23 +1607,32 @@ static int ftrace_convert_nops(unsigned long *start,
1279 p = start; 1607 p = start;
1280 while (p < end) { 1608 while (p < end) {
1281 addr = ftrace_call_adjust(*p++); 1609 addr = ftrace_call_adjust(*p++);
1610 /*
1611 * Some architecture linkers will pad between
1612 * the different mcount_loc sections of different
1613 * object files to satisfy alignments.
1614 * Skip any NULL pointers.
1615 */
1616 if (!addr)
1617 continue;
1282 ftrace_record_ip(addr); 1618 ftrace_record_ip(addr);
1283 } 1619 }
1284 1620
1285 /* disable interrupts to prevent kstop machine */ 1621 /* disable interrupts to prevent kstop machine */
1286 local_irq_save(flags); 1622 local_irq_save(flags);
1287 ftrace_update_code(); 1623 ftrace_update_code(mod);
1288 local_irq_restore(flags); 1624 local_irq_restore(flags);
1289 mutex_unlock(&ftrace_start_lock); 1625 mutex_unlock(&ftrace_start_lock);
1290 1626
1291 return 0; 1627 return 0;
1292} 1628}
1293 1629
1294void ftrace_init_module(unsigned long *start, unsigned long *end) 1630void ftrace_init_module(struct module *mod,
1631 unsigned long *start, unsigned long *end)
1295{ 1632{
1296 if (ftrace_disabled || start == end) 1633 if (ftrace_disabled || start == end)
1297 return; 1634 return;
1298 ftrace_convert_nops(start, end); 1635 ftrace_convert_nops(mod, start, end);
1299} 1636}
1300 1637
1301extern unsigned long __start_mcount_loc[]; 1638extern unsigned long __start_mcount_loc[];
@@ -1325,7 +1662,8 @@ void __init ftrace_init(void)
1325 1662
1326 last_ftrace_enabled = ftrace_enabled = 1; 1663 last_ftrace_enabled = ftrace_enabled = 1;
1327 1664
1328 ret = ftrace_convert_nops(__start_mcount_loc, 1665 ret = ftrace_convert_nops(NULL,
1666 __start_mcount_loc,
1329 __stop_mcount_loc); 1667 __stop_mcount_loc);
1330 1668
1331 return; 1669 return;
@@ -1342,12 +1680,186 @@ static int __init ftrace_nodyn_init(void)
1342} 1680}
1343device_initcall(ftrace_nodyn_init); 1681device_initcall(ftrace_nodyn_init);
1344 1682
1345# define ftrace_startup() do { } while (0) 1683static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1346# define ftrace_shutdown() do { } while (0) 1684static inline void ftrace_startup_enable(int command) { }
1685/* Keep as macros so we do not need to define the commands */
1686# define ftrace_startup(command) do { } while (0)
1687# define ftrace_shutdown(command) do { } while (0)
1347# define ftrace_startup_sysctl() do { } while (0) 1688# define ftrace_startup_sysctl() do { } while (0)
1348# define ftrace_shutdown_sysctl() do { } while (0) 1689# define ftrace_shutdown_sysctl() do { } while (0)
1349#endif /* CONFIG_DYNAMIC_FTRACE */ 1690#endif /* CONFIG_DYNAMIC_FTRACE */
1350 1691
1692static ssize_t
1693ftrace_pid_read(struct file *file, char __user *ubuf,
1694 size_t cnt, loff_t *ppos)
1695{
1696 char buf[64];
1697 int r;
1698
1699 if (ftrace_pid_trace == ftrace_swapper_pid)
1700 r = sprintf(buf, "swapper tasks\n");
1701 else if (ftrace_pid_trace)
1702 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1703 else
1704 r = sprintf(buf, "no pid\n");
1705
1706 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1707}
1708
1709static void clear_ftrace_swapper(void)
1710{
1711 struct task_struct *p;
1712 int cpu;
1713
1714 get_online_cpus();
1715 for_each_online_cpu(cpu) {
1716 p = idle_task(cpu);
1717 clear_tsk_trace_trace(p);
1718 }
1719 put_online_cpus();
1720}
1721
1722static void set_ftrace_swapper(void)
1723{
1724 struct task_struct *p;
1725 int cpu;
1726
1727 get_online_cpus();
1728 for_each_online_cpu(cpu) {
1729 p = idle_task(cpu);
1730 set_tsk_trace_trace(p);
1731 }
1732 put_online_cpus();
1733}
1734
1735static void clear_ftrace_pid(struct pid *pid)
1736{
1737 struct task_struct *p;
1738
1739 do_each_pid_task(pid, PIDTYPE_PID, p) {
1740 clear_tsk_trace_trace(p);
1741 } while_each_pid_task(pid, PIDTYPE_PID, p);
1742 put_pid(pid);
1743}
1744
1745static void set_ftrace_pid(struct pid *pid)
1746{
1747 struct task_struct *p;
1748
1749 do_each_pid_task(pid, PIDTYPE_PID, p) {
1750 set_tsk_trace_trace(p);
1751 } while_each_pid_task(pid, PIDTYPE_PID, p);
1752}
1753
1754static void clear_ftrace_pid_task(struct pid **pid)
1755{
1756 if (*pid == ftrace_swapper_pid)
1757 clear_ftrace_swapper();
1758 else
1759 clear_ftrace_pid(*pid);
1760
1761 *pid = NULL;
1762}
1763
1764static void set_ftrace_pid_task(struct pid *pid)
1765{
1766 if (pid == ftrace_swapper_pid)
1767 set_ftrace_swapper();
1768 else
1769 set_ftrace_pid(pid);
1770}
1771
1772static ssize_t
1773ftrace_pid_write(struct file *filp, const char __user *ubuf,
1774 size_t cnt, loff_t *ppos)
1775{
1776 struct pid *pid;
1777 char buf[64];
1778 long val;
1779 int ret;
1780
1781 if (cnt >= sizeof(buf))
1782 return -EINVAL;
1783
1784 if (copy_from_user(&buf, ubuf, cnt))
1785 return -EFAULT;
1786
1787 buf[cnt] = 0;
1788
1789 ret = strict_strtol(buf, 10, &val);
1790 if (ret < 0)
1791 return ret;
1792
1793 mutex_lock(&ftrace_start_lock);
1794 if (val < 0) {
1795 /* disable pid tracing */
1796 if (!ftrace_pid_trace)
1797 goto out;
1798
1799 clear_ftrace_pid_task(&ftrace_pid_trace);
1800
1801 } else {
1802 /* swapper task is special */
1803 if (!val) {
1804 pid = ftrace_swapper_pid;
1805 if (pid == ftrace_pid_trace)
1806 goto out;
1807 } else {
1808 pid = find_get_pid(val);
1809
1810 if (pid == ftrace_pid_trace) {
1811 put_pid(pid);
1812 goto out;
1813 }
1814 }
1815
1816 if (ftrace_pid_trace)
1817 clear_ftrace_pid_task(&ftrace_pid_trace);
1818
1819 if (!pid)
1820 goto out;
1821
1822 ftrace_pid_trace = pid;
1823
1824 set_ftrace_pid_task(ftrace_pid_trace);
1825 }
1826
1827 /* update the function call */
1828 ftrace_update_pid_func();
1829 ftrace_startup_enable(0);
1830
1831 out:
1832 mutex_unlock(&ftrace_start_lock);
1833
1834 return cnt;
1835}
1836
1837static struct file_operations ftrace_pid_fops = {
1838 .read = ftrace_pid_read,
1839 .write = ftrace_pid_write,
1840};
1841
1842static __init int ftrace_init_debugfs(void)
1843{
1844 struct dentry *d_tracer;
1845 struct dentry *entry;
1846
1847 d_tracer = tracing_init_dentry();
1848 if (!d_tracer)
1849 return 0;
1850
1851 ftrace_init_dyn_debugfs(d_tracer);
1852
1853 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1854 NULL, &ftrace_pid_fops);
1855 if (!entry)
1856 pr_warning("Could not create debugfs "
1857 "'set_ftrace_pid' entry\n");
1858 return 0;
1859}
1860
1861fs_initcall(ftrace_init_debugfs);
1862
1351/** 1863/**
1352 * ftrace_kill - kill ftrace 1864 * ftrace_kill - kill ftrace
1353 * 1865 *
@@ -1381,10 +1893,11 @@ int register_ftrace_function(struct ftrace_ops *ops)
1381 return -1; 1893 return -1;
1382 1894
1383 mutex_lock(&ftrace_sysctl_lock); 1895 mutex_lock(&ftrace_sysctl_lock);
1896
1384 ret = __register_ftrace_function(ops); 1897 ret = __register_ftrace_function(ops);
1385 ftrace_startup(); 1898 ftrace_startup(0);
1386 mutex_unlock(&ftrace_sysctl_lock);
1387 1899
1900 mutex_unlock(&ftrace_sysctl_lock);
1388 return ret; 1901 return ret;
1389} 1902}
1390 1903
@@ -1400,7 +1913,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1400 1913
1401 mutex_lock(&ftrace_sysctl_lock); 1914 mutex_lock(&ftrace_sysctl_lock);
1402 ret = __unregister_ftrace_function(ops); 1915 ret = __unregister_ftrace_function(ops);
1403 ftrace_shutdown(); 1916 ftrace_shutdown(0);
1404 mutex_unlock(&ftrace_sysctl_lock); 1917 mutex_unlock(&ftrace_sysctl_lock);
1405 1918
1406 return ret; 1919 return ret;
@@ -1449,3 +1962,153 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1449 return ret; 1962 return ret;
1450} 1963}
1451 1964
1965#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1966
1967static atomic_t ftrace_graph_active;
1968
1969int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1970{
1971 return 0;
1972}
1973
1974/* The callbacks that hook a function */
1975trace_func_graph_ret_t ftrace_graph_return =
1976 (trace_func_graph_ret_t)ftrace_stub;
1977trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1978
1979/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1980static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1981{
1982 int i;
1983 int ret = 0;
1984 unsigned long flags;
1985 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1986 struct task_struct *g, *t;
1987
1988 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1989 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1990 * sizeof(struct ftrace_ret_stack),
1991 GFP_KERNEL);
1992 if (!ret_stack_list[i]) {
1993 start = 0;
1994 end = i;
1995 ret = -ENOMEM;
1996 goto free;
1997 }
1998 }
1999
2000 read_lock_irqsave(&tasklist_lock, flags);
2001 do_each_thread(g, t) {
2002 if (start == end) {
2003 ret = -EAGAIN;
2004 goto unlock;
2005 }
2006
2007 if (t->ret_stack == NULL) {
2008 t->curr_ret_stack = -1;
2009 /* Make sure IRQs see the -1 first: */
2010 barrier();
2011 t->ret_stack = ret_stack_list[start++];
2012 atomic_set(&t->tracing_graph_pause, 0);
2013 atomic_set(&t->trace_overrun, 0);
2014 }
2015 } while_each_thread(g, t);
2016
2017unlock:
2018 read_unlock_irqrestore(&tasklist_lock, flags);
2019free:
2020 for (i = start; i < end; i++)
2021 kfree(ret_stack_list[i]);
2022 return ret;
2023}
2024
2025/* Allocate a return stack for each task */
2026static int start_graph_tracing(void)
2027{
2028 struct ftrace_ret_stack **ret_stack_list;
2029 int ret;
2030
2031 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2032 sizeof(struct ftrace_ret_stack *),
2033 GFP_KERNEL);
2034
2035 if (!ret_stack_list)
2036 return -ENOMEM;
2037
2038 do {
2039 ret = alloc_retstack_tasklist(ret_stack_list);
2040 } while (ret == -EAGAIN);
2041
2042 kfree(ret_stack_list);
2043 return ret;
2044}
2045
2046int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2047 trace_func_graph_ent_t entryfunc)
2048{
2049 int ret = 0;
2050
2051 mutex_lock(&ftrace_sysctl_lock);
2052
2053 atomic_inc(&ftrace_graph_active);
2054 ret = start_graph_tracing();
2055 if (ret) {
2056 atomic_dec(&ftrace_graph_active);
2057 goto out;
2058 }
2059
2060 ftrace_graph_return = retfunc;
2061 ftrace_graph_entry = entryfunc;
2062
2063 ftrace_startup(FTRACE_START_FUNC_RET);
2064
2065out:
2066 mutex_unlock(&ftrace_sysctl_lock);
2067 return ret;
2068}
2069
2070void unregister_ftrace_graph(void)
2071{
2072 mutex_lock(&ftrace_sysctl_lock);
2073
2074 atomic_dec(&ftrace_graph_active);
2075 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2076 ftrace_graph_entry = ftrace_graph_entry_stub;
2077 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2078
2079 mutex_unlock(&ftrace_sysctl_lock);
2080}
2081
2082/* Allocate a return stack for newly created task */
2083void ftrace_graph_init_task(struct task_struct *t)
2084{
2085 if (atomic_read(&ftrace_graph_active)) {
2086 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2087 * sizeof(struct ftrace_ret_stack),
2088 GFP_KERNEL);
2089 if (!t->ret_stack)
2090 return;
2091 t->curr_ret_stack = -1;
2092 atomic_set(&t->tracing_graph_pause, 0);
2093 atomic_set(&t->trace_overrun, 0);
2094 } else
2095 t->ret_stack = NULL;
2096}
2097
2098void ftrace_graph_exit_task(struct task_struct *t)
2099{
2100 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2101
2102 t->ret_stack = NULL;
2103 /* NULL must become visible to IRQs before we free it: */
2104 barrier();
2105
2106 kfree(ret_stack);
2107}
2108
2109void ftrace_graph_stop(void)
2110{
2111 ftrace_stop();
2112}
2113#endif
2114
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 668bbb5ef2bd..76f34c0ef29c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -18,8 +18,46 @@
18 18
19#include "trace.h" 19#include "trace.h"
20 20
21/* Global flag to disable all recording to ring buffers */ 21/*
22static int ring_buffers_off __read_mostly; 22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
27 *
28 * There's three layers that must be on in order to write
29 * to the ring buffer.
30 *
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
34 *
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
37 */
38
39/*
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
42 *
43 * ON DISABLED
44 * ---- ----------
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
48 */
49
50enum {
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
53};
54
55enum {
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58};
59
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
23 61
24/** 62/**
25 * tracing_on - enable all tracing buffers 63 * tracing_on - enable all tracing buffers
@@ -29,7 +67,7 @@ static int ring_buffers_off __read_mostly;
29 */ 67 */
30void tracing_on(void) 68void tracing_on(void)
31{ 69{
32 ring_buffers_off = 0; 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
33} 71}
34 72
35/** 73/**
@@ -42,9 +80,22 @@ void tracing_on(void)
42 */ 80 */
43void tracing_off(void) 81void tracing_off(void)
44{ 82{
45 ring_buffers_off = 1; 83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
46} 84}
47 85
86/**
87 * tracing_off_permanent - permanently disable ring buffers
88 *
89 * This function, once called, will disable all ring buffers
90 * permanenty.
91 */
92void tracing_off_permanent(void)
93{
94 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
95}
96
97#include "trace.h"
98
48/* Up this if you want to test the TIME_EXTENTS and normalization */ 99/* Up this if you want to test the TIME_EXTENTS and normalization */
49#define DEBUG_SHIFT 0 100#define DEBUG_SHIFT 0
50 101
@@ -56,7 +107,7 @@ u64 ring_buffer_time_stamp(int cpu)
56 preempt_disable_notrace(); 107 preempt_disable_notrace();
57 /* shift to debug/test normalization and TIME_EXTENTS */ 108 /* shift to debug/test normalization and TIME_EXTENTS */
58 time = sched_clock() << DEBUG_SHIFT; 109 time = sched_clock() << DEBUG_SHIFT;
59 preempt_enable_notrace(); 110 preempt_enable_no_resched_notrace();
60 111
61 return time; 112 return time;
62} 113}
@@ -144,20 +195,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
144#define TS_MASK ((1ULL << TS_SHIFT) - 1) 195#define TS_MASK ((1ULL << TS_SHIFT) - 1)
145#define TS_DELTA_TEST (~TS_MASK) 196#define TS_DELTA_TEST (~TS_MASK)
146 197
147/* 198struct buffer_data_page {
148 * This hack stolen from mm/slob.c.
149 * We can store per page timing information in the page frame of the page.
150 * Thanks to Peter Zijlstra for suggesting this idea.
151 */
152struct buffer_page {
153 u64 time_stamp; /* page time stamp */ 199 u64 time_stamp; /* page time stamp */
154 local_t write; /* index for next write */
155 local_t commit; /* write commited index */ 200 local_t commit; /* write commited index */
201 unsigned char data[]; /* data of buffer page */
202};
203
204struct buffer_page {
205 local_t write; /* index for next write */
156 unsigned read; /* index for next read */ 206 unsigned read; /* index for next read */
157 struct list_head list; /* list of free pages */ 207 struct list_head list; /* list of free pages */
158 void *page; /* Actual data page */ 208 struct buffer_data_page *page; /* Actual data page */
159}; 209};
160 210
211static void rb_init_page(struct buffer_data_page *bpage)
212{
213 local_set(&bpage->commit, 0);
214}
215
161/* 216/*
162 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 217 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
163 * this issue out. 218 * this issue out.
@@ -179,7 +234,7 @@ static inline int test_time_stamp(u64 delta)
179 return 0; 234 return 0;
180} 235}
181 236
182#define BUF_PAGE_SIZE PAGE_SIZE 237#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
183 238
184/* 239/*
185 * head_page == tail_page && head == tail then buffer is empty. 240 * head_page == tail_page && head == tail then buffer is empty.
@@ -187,7 +242,8 @@ static inline int test_time_stamp(u64 delta)
187struct ring_buffer_per_cpu { 242struct ring_buffer_per_cpu {
188 int cpu; 243 int cpu;
189 struct ring_buffer *buffer; 244 struct ring_buffer *buffer;
190 spinlock_t lock; 245 spinlock_t reader_lock; /* serialize readers */
246 raw_spinlock_t lock;
191 struct lock_class_key lock_key; 247 struct lock_class_key lock_key;
192 struct list_head pages; 248 struct list_head pages;
193 struct buffer_page *head_page; /* read from head */ 249 struct buffer_page *head_page; /* read from head */
@@ -202,7 +258,6 @@ struct ring_buffer_per_cpu {
202}; 258};
203 259
204struct ring_buffer { 260struct ring_buffer {
205 unsigned long size;
206 unsigned pages; 261 unsigned pages;
207 unsigned flags; 262 unsigned flags;
208 int cpus; 263 int cpus;
@@ -221,32 +276,16 @@ struct ring_buffer_iter {
221 u64 read_stamp; 276 u64 read_stamp;
222}; 277};
223 278
279/* buffer may be either ring_buffer or ring_buffer_per_cpu */
224#define RB_WARN_ON(buffer, cond) \ 280#define RB_WARN_ON(buffer, cond) \
225 do { \ 281 ({ \
226 if (unlikely(cond)) { \ 282 int _____ret = unlikely(cond); \
227 atomic_inc(&buffer->record_disabled); \ 283 if (_____ret) { \
228 WARN_ON(1); \
229 } \
230 } while (0)
231
232#define RB_WARN_ON_RET(buffer, cond) \
233 do { \
234 if (unlikely(cond)) { \
235 atomic_inc(&buffer->record_disabled); \
236 WARN_ON(1); \
237 return -1; \
238 } \
239 } while (0)
240
241#define RB_WARN_ON_ONCE(buffer, cond) \
242 do { \
243 static int once; \
244 if (unlikely(cond) && !once) { \
245 once++; \
246 atomic_inc(&buffer->record_disabled); \ 284 atomic_inc(&buffer->record_disabled); \
247 WARN_ON(1); \ 285 WARN_ON(1); \
248 } \ 286 } \
249 } while (0) 287 _____ret; \
288 })
250 289
251/** 290/**
252 * check_pages - integrity check of buffer pages 291 * check_pages - integrity check of buffer pages
@@ -258,16 +297,20 @@ struct ring_buffer_iter {
258static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 297static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
259{ 298{
260 struct list_head *head = &cpu_buffer->pages; 299 struct list_head *head = &cpu_buffer->pages;
261 struct buffer_page *page, *tmp; 300 struct buffer_page *bpage, *tmp;
262 301
263 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); 302 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
264 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); 303 return -1;
304 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
305 return -1;
265 306
266 list_for_each_entry_safe(page, tmp, head, list) { 307 list_for_each_entry_safe(bpage, tmp, head, list) {
267 RB_WARN_ON_RET(cpu_buffer, 308 if (RB_WARN_ON(cpu_buffer,
268 page->list.next->prev != &page->list); 309 bpage->list.next->prev != &bpage->list))
269 RB_WARN_ON_RET(cpu_buffer, 310 return -1;
270 page->list.prev->next != &page->list); 311 if (RB_WARN_ON(cpu_buffer,
312 bpage->list.prev->next != &bpage->list))
313 return -1;
271 } 314 }
272 315
273 return 0; 316 return 0;
@@ -277,22 +320,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
277 unsigned nr_pages) 320 unsigned nr_pages)
278{ 321{
279 struct list_head *head = &cpu_buffer->pages; 322 struct list_head *head = &cpu_buffer->pages;
280 struct buffer_page *page, *tmp; 323 struct buffer_page *bpage, *tmp;
281 unsigned long addr; 324 unsigned long addr;
282 LIST_HEAD(pages); 325 LIST_HEAD(pages);
283 unsigned i; 326 unsigned i;
284 327
285 for (i = 0; i < nr_pages; i++) { 328 for (i = 0; i < nr_pages; i++) {
286 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 329 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
287 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 330 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
288 if (!page) 331 if (!bpage)
289 goto free_pages; 332 goto free_pages;
290 list_add(&page->list, &pages); 333 list_add(&bpage->list, &pages);
291 334
292 addr = __get_free_page(GFP_KERNEL); 335 addr = __get_free_page(GFP_KERNEL);
293 if (!addr) 336 if (!addr)
294 goto free_pages; 337 goto free_pages;
295 page->page = (void *)addr; 338 bpage->page = (void *)addr;
339 rb_init_page(bpage->page);
296 } 340 }
297 341
298 list_splice(&pages, head); 342 list_splice(&pages, head);
@@ -302,9 +346,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
302 return 0; 346 return 0;
303 347
304 free_pages: 348 free_pages:
305 list_for_each_entry_safe(page, tmp, &pages, list) { 349 list_for_each_entry_safe(bpage, tmp, &pages, list) {
306 list_del_init(&page->list); 350 list_del_init(&bpage->list);
307 free_buffer_page(page); 351 free_buffer_page(bpage);
308 } 352 }
309 return -ENOMEM; 353 return -ENOMEM;
310} 354}
@@ -313,7 +357,7 @@ static struct ring_buffer_per_cpu *
313rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 357rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
314{ 358{
315 struct ring_buffer_per_cpu *cpu_buffer; 359 struct ring_buffer_per_cpu *cpu_buffer;
316 struct buffer_page *page; 360 struct buffer_page *bpage;
317 unsigned long addr; 361 unsigned long addr;
318 int ret; 362 int ret;
319 363
@@ -324,19 +368,21 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
324 368
325 cpu_buffer->cpu = cpu; 369 cpu_buffer->cpu = cpu;
326 cpu_buffer->buffer = buffer; 370 cpu_buffer->buffer = buffer;
327 spin_lock_init(&cpu_buffer->lock); 371 spin_lock_init(&cpu_buffer->reader_lock);
372 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
328 INIT_LIST_HEAD(&cpu_buffer->pages); 373 INIT_LIST_HEAD(&cpu_buffer->pages);
329 374
330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 375 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
331 GFP_KERNEL, cpu_to_node(cpu)); 376 GFP_KERNEL, cpu_to_node(cpu));
332 if (!page) 377 if (!bpage)
333 goto fail_free_buffer; 378 goto fail_free_buffer;
334 379
335 cpu_buffer->reader_page = page; 380 cpu_buffer->reader_page = bpage;
336 addr = __get_free_page(GFP_KERNEL); 381 addr = __get_free_page(GFP_KERNEL);
337 if (!addr) 382 if (!addr)
338 goto fail_free_reader; 383 goto fail_free_reader;
339 page->page = (void *)addr; 384 bpage->page = (void *)addr;
385 rb_init_page(bpage->page);
340 386
341 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 387 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
342 388
@@ -361,14 +407,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
361static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 407static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
362{ 408{
363 struct list_head *head = &cpu_buffer->pages; 409 struct list_head *head = &cpu_buffer->pages;
364 struct buffer_page *page, *tmp; 410 struct buffer_page *bpage, *tmp;
365 411
366 list_del_init(&cpu_buffer->reader_page->list); 412 list_del_init(&cpu_buffer->reader_page->list);
367 free_buffer_page(cpu_buffer->reader_page); 413 free_buffer_page(cpu_buffer->reader_page);
368 414
369 list_for_each_entry_safe(page, tmp, head, list) { 415 list_for_each_entry_safe(bpage, tmp, head, list) {
370 list_del_init(&page->list); 416 list_del_init(&bpage->list);
371 free_buffer_page(page); 417 free_buffer_page(bpage);
372 } 418 }
373 kfree(cpu_buffer); 419 kfree(cpu_buffer);
374} 420}
@@ -465,7 +511,7 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
465static void 511static void
466rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 512rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
467{ 513{
468 struct buffer_page *page; 514 struct buffer_page *bpage;
469 struct list_head *p; 515 struct list_head *p;
470 unsigned i; 516 unsigned i;
471 517
@@ -473,13 +519,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
473 synchronize_sched(); 519 synchronize_sched();
474 520
475 for (i = 0; i < nr_pages; i++) { 521 for (i = 0; i < nr_pages; i++) {
476 BUG_ON(list_empty(&cpu_buffer->pages)); 522 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
523 return;
477 p = cpu_buffer->pages.next; 524 p = cpu_buffer->pages.next;
478 page = list_entry(p, struct buffer_page, list); 525 bpage = list_entry(p, struct buffer_page, list);
479 list_del_init(&page->list); 526 list_del_init(&bpage->list);
480 free_buffer_page(page); 527 free_buffer_page(bpage);
481 } 528 }
482 BUG_ON(list_empty(&cpu_buffer->pages)); 529 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
530 return;
483 531
484 rb_reset_cpu(cpu_buffer); 532 rb_reset_cpu(cpu_buffer);
485 533
@@ -493,7 +541,7 @@ static void
493rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 541rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
494 struct list_head *pages, unsigned nr_pages) 542 struct list_head *pages, unsigned nr_pages)
495{ 543{
496 struct buffer_page *page; 544 struct buffer_page *bpage;
497 struct list_head *p; 545 struct list_head *p;
498 unsigned i; 546 unsigned i;
499 547
@@ -501,11 +549,12 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
501 synchronize_sched(); 549 synchronize_sched();
502 550
503 for (i = 0; i < nr_pages; i++) { 551 for (i = 0; i < nr_pages; i++) {
504 BUG_ON(list_empty(pages)); 552 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
553 return;
505 p = pages->next; 554 p = pages->next;
506 page = list_entry(p, struct buffer_page, list); 555 bpage = list_entry(p, struct buffer_page, list);
507 list_del_init(&page->list); 556 list_del_init(&bpage->list);
508 list_add_tail(&page->list, &cpu_buffer->pages); 557 list_add_tail(&bpage->list, &cpu_buffer->pages);
509 } 558 }
510 rb_reset_cpu(cpu_buffer); 559 rb_reset_cpu(cpu_buffer);
511 560
@@ -532,7 +581,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
532{ 581{
533 struct ring_buffer_per_cpu *cpu_buffer; 582 struct ring_buffer_per_cpu *cpu_buffer;
534 unsigned nr_pages, rm_pages, new_pages; 583 unsigned nr_pages, rm_pages, new_pages;
535 struct buffer_page *page, *tmp; 584 struct buffer_page *bpage, *tmp;
536 unsigned long buffer_size; 585 unsigned long buffer_size;
537 unsigned long addr; 586 unsigned long addr;
538 LIST_HEAD(pages); 587 LIST_HEAD(pages);
@@ -562,7 +611,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
562 if (size < buffer_size) { 611 if (size < buffer_size) {
563 612
564 /* easy case, just free pages */ 613 /* easy case, just free pages */
565 BUG_ON(nr_pages >= buffer->pages); 614 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
615 mutex_unlock(&buffer->mutex);
616 return -1;
617 }
566 618
567 rm_pages = buffer->pages - nr_pages; 619 rm_pages = buffer->pages - nr_pages;
568 620
@@ -581,21 +633,26 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
581 * add these pages to the cpu_buffers. Otherwise we just free 633 * add these pages to the cpu_buffers. Otherwise we just free
582 * them all and return -ENOMEM; 634 * them all and return -ENOMEM;
583 */ 635 */
584 BUG_ON(nr_pages <= buffer->pages); 636 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
637 mutex_unlock(&buffer->mutex);
638 return -1;
639 }
640
585 new_pages = nr_pages - buffer->pages; 641 new_pages = nr_pages - buffer->pages;
586 642
587 for_each_buffer_cpu(buffer, cpu) { 643 for_each_buffer_cpu(buffer, cpu) {
588 for (i = 0; i < new_pages; i++) { 644 for (i = 0; i < new_pages; i++) {
589 page = kzalloc_node(ALIGN(sizeof(*page), 645 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
590 cache_line_size()), 646 cache_line_size()),
591 GFP_KERNEL, cpu_to_node(cpu)); 647 GFP_KERNEL, cpu_to_node(cpu));
592 if (!page) 648 if (!bpage)
593 goto free_pages; 649 goto free_pages;
594 list_add(&page->list, &pages); 650 list_add(&bpage->list, &pages);
595 addr = __get_free_page(GFP_KERNEL); 651 addr = __get_free_page(GFP_KERNEL);
596 if (!addr) 652 if (!addr)
597 goto free_pages; 653 goto free_pages;
598 page->page = (void *)addr; 654 bpage->page = (void *)addr;
655 rb_init_page(bpage->page);
599 } 656 }
600 } 657 }
601 658
@@ -604,7 +661,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
604 rb_insert_pages(cpu_buffer, &pages, new_pages); 661 rb_insert_pages(cpu_buffer, &pages, new_pages);
605 } 662 }
606 663
607 BUG_ON(!list_empty(&pages)); 664 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
665 mutex_unlock(&buffer->mutex);
666 return -1;
667 }
608 668
609 out: 669 out:
610 buffer->pages = nr_pages; 670 buffer->pages = nr_pages;
@@ -613,9 +673,9 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
613 return size; 673 return size;
614 674
615 free_pages: 675 free_pages:
616 list_for_each_entry_safe(page, tmp, &pages, list) { 676 list_for_each_entry_safe(bpage, tmp, &pages, list) {
617 list_del_init(&page->list); 677 list_del_init(&bpage->list);
618 free_buffer_page(page); 678 free_buffer_page(bpage);
619 } 679 }
620 mutex_unlock(&buffer->mutex); 680 mutex_unlock(&buffer->mutex);
621 return -ENOMEM; 681 return -ENOMEM;
@@ -626,9 +686,15 @@ static inline int rb_null_event(struct ring_buffer_event *event)
626 return event->type == RINGBUF_TYPE_PADDING; 686 return event->type == RINGBUF_TYPE_PADDING;
627} 687}
628 688
629static inline void *__rb_page_index(struct buffer_page *page, unsigned index) 689static inline void *
690__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
691{
692 return bpage->data + index;
693}
694
695static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
630{ 696{
631 return page->page + index; 697 return bpage->page->data + index;
632} 698}
633 699
634static inline struct ring_buffer_event * 700static inline struct ring_buffer_event *
@@ -658,7 +724,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage)
658 724
659static inline unsigned rb_page_commit(struct buffer_page *bpage) 725static inline unsigned rb_page_commit(struct buffer_page *bpage)
660{ 726{
661 return local_read(&bpage->commit); 727 return local_read(&bpage->page->commit);
662} 728}
663 729
664/* Size is determined by what has been commited */ 730/* Size is determined by what has been commited */
@@ -693,7 +759,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
693 head += rb_event_length(event)) { 759 head += rb_event_length(event)) {
694 760
695 event = __rb_page_index(cpu_buffer->head_page, head); 761 event = __rb_page_index(cpu_buffer->head_page, head);
696 BUG_ON(rb_null_event(event)); 762 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
763 return;
697 /* Only count data entries */ 764 /* Only count data entries */
698 if (event->type != RINGBUF_TYPE_DATA) 765 if (event->type != RINGBUF_TYPE_DATA)
699 continue; 766 continue;
@@ -703,14 +770,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
703} 770}
704 771
705static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 772static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
706 struct buffer_page **page) 773 struct buffer_page **bpage)
707{ 774{
708 struct list_head *p = (*page)->list.next; 775 struct list_head *p = (*bpage)->list.next;
709 776
710 if (p == &cpu_buffer->pages) 777 if (p == &cpu_buffer->pages)
711 p = p->next; 778 p = p->next;
712 779
713 *page = list_entry(p, struct buffer_page, list); 780 *bpage = list_entry(p, struct buffer_page, list);
714} 781}
715 782
716static inline unsigned 783static inline unsigned
@@ -746,16 +813,18 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
746 addr &= PAGE_MASK; 813 addr &= PAGE_MASK;
747 814
748 while (cpu_buffer->commit_page->page != (void *)addr) { 815 while (cpu_buffer->commit_page->page != (void *)addr) {
749 RB_WARN_ON(cpu_buffer, 816 if (RB_WARN_ON(cpu_buffer,
750 cpu_buffer->commit_page == cpu_buffer->tail_page); 817 cpu_buffer->commit_page == cpu_buffer->tail_page))
751 cpu_buffer->commit_page->commit = 818 return;
819 cpu_buffer->commit_page->page->commit =
752 cpu_buffer->commit_page->write; 820 cpu_buffer->commit_page->write;
753 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 821 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
754 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 822 cpu_buffer->write_stamp =
823 cpu_buffer->commit_page->page->time_stamp;
755 } 824 }
756 825
757 /* Now set the commit to the event's index */ 826 /* Now set the commit to the event's index */
758 local_set(&cpu_buffer->commit_page->commit, index); 827 local_set(&cpu_buffer->commit_page->page->commit, index);
759} 828}
760 829
761static inline void 830static inline void
@@ -769,25 +838,38 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
769 * back to us). This allows us to do a simple loop to 838 * back to us). This allows us to do a simple loop to
770 * assign the commit to the tail. 839 * assign the commit to the tail.
771 */ 840 */
841 again:
772 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
773 cpu_buffer->commit_page->commit = 843 cpu_buffer->commit_page->page->commit =
774 cpu_buffer->commit_page->write; 844 cpu_buffer->commit_page->write;
775 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 845 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
776 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 846 cpu_buffer->write_stamp =
847 cpu_buffer->commit_page->page->time_stamp;
777 /* add barrier to keep gcc from optimizing too much */ 848 /* add barrier to keep gcc from optimizing too much */
778 barrier(); 849 barrier();
779 } 850 }
780 while (rb_commit_index(cpu_buffer) != 851 while (rb_commit_index(cpu_buffer) !=
781 rb_page_write(cpu_buffer->commit_page)) { 852 rb_page_write(cpu_buffer->commit_page)) {
782 cpu_buffer->commit_page->commit = 853 cpu_buffer->commit_page->page->commit =
783 cpu_buffer->commit_page->write; 854 cpu_buffer->commit_page->write;
784 barrier(); 855 barrier();
785 } 856 }
857
858 /* again, keep gcc from optimizing */
859 barrier();
860
861 /*
862 * If an interrupt came in just after the first while loop
863 * and pushed the tail page forward, we will be left with
864 * a dangling commit that will never go forward.
865 */
866 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
867 goto again;
786} 868}
787 869
788static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 870static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
789{ 871{
790 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; 872 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
791 cpu_buffer->reader_page->read = 0; 873 cpu_buffer->reader_page->read = 0;
792} 874}
793 875
@@ -806,7 +888,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
806 else 888 else
807 rb_inc_page(cpu_buffer, &iter->head_page); 889 rb_inc_page(cpu_buffer, &iter->head_page);
808 890
809 iter->read_stamp = iter->head_page->time_stamp; 891 iter->read_stamp = iter->head_page->page->time_stamp;
810 iter->head = 0; 892 iter->head = 0;
811} 893}
812 894
@@ -880,12 +962,15 @@ static struct ring_buffer_event *
880__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 962__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
881 unsigned type, unsigned long length, u64 *ts) 963 unsigned type, unsigned long length, u64 *ts)
882{ 964{
883 struct buffer_page *tail_page, *head_page, *reader_page; 965 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
884 unsigned long tail, write; 966 unsigned long tail, write;
885 struct ring_buffer *buffer = cpu_buffer->buffer; 967 struct ring_buffer *buffer = cpu_buffer->buffer;
886 struct ring_buffer_event *event; 968 struct ring_buffer_event *event;
887 unsigned long flags; 969 unsigned long flags;
888 970
971 commit_page = cpu_buffer->commit_page;
972 /* we just need to protect against interrupts */
973 barrier();
889 tail_page = cpu_buffer->tail_page; 974 tail_page = cpu_buffer->tail_page;
890 write = local_add_return(length, &tail_page->write); 975 write = local_add_return(length, &tail_page->write);
891 tail = write - length; 976 tail = write - length;
@@ -894,7 +979,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
894 if (write > BUF_PAGE_SIZE) { 979 if (write > BUF_PAGE_SIZE) {
895 struct buffer_page *next_page = tail_page; 980 struct buffer_page *next_page = tail_page;
896 981
897 spin_lock_irqsave(&cpu_buffer->lock, flags); 982 local_irq_save(flags);
983 __raw_spin_lock(&cpu_buffer->lock);
898 984
899 rb_inc_page(cpu_buffer, &next_page); 985 rb_inc_page(cpu_buffer, &next_page);
900 986
@@ -902,14 +988,15 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
902 reader_page = cpu_buffer->reader_page; 988 reader_page = cpu_buffer->reader_page;
903 989
904 /* we grabbed the lock before incrementing */ 990 /* we grabbed the lock before incrementing */
905 RB_WARN_ON(cpu_buffer, next_page == reader_page); 991 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
992 goto out_unlock;
906 993
907 /* 994 /*
908 * If for some reason, we had an interrupt storm that made 995 * If for some reason, we had an interrupt storm that made
909 * it all the way around the buffer, bail, and warn 996 * it all the way around the buffer, bail, and warn
910 * about it. 997 * about it.
911 */ 998 */
912 if (unlikely(next_page == cpu_buffer->commit_page)) { 999 if (unlikely(next_page == commit_page)) {
913 WARN_ON_ONCE(1); 1000 WARN_ON_ONCE(1);
914 goto out_unlock; 1001 goto out_unlock;
915 } 1002 }
@@ -940,12 +1027,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
940 */ 1027 */
941 if (tail_page == cpu_buffer->tail_page) { 1028 if (tail_page == cpu_buffer->tail_page) {
942 local_set(&next_page->write, 0); 1029 local_set(&next_page->write, 0);
943 local_set(&next_page->commit, 0); 1030 local_set(&next_page->page->commit, 0);
944 cpu_buffer->tail_page = next_page; 1031 cpu_buffer->tail_page = next_page;
945 1032
946 /* reread the time stamp */ 1033 /* reread the time stamp */
947 *ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1034 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
948 cpu_buffer->tail_page->time_stamp = *ts; 1035 cpu_buffer->tail_page->page->time_stamp = *ts;
949 } 1036 }
950 1037
951 /* 1038 /*
@@ -970,7 +1057,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
970 rb_set_commit_to_write(cpu_buffer); 1057 rb_set_commit_to_write(cpu_buffer);
971 } 1058 }
972 1059
973 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1060 __raw_spin_unlock(&cpu_buffer->lock);
1061 local_irq_restore(flags);
974 1062
975 /* fail and let the caller try again */ 1063 /* fail and let the caller try again */
976 return ERR_PTR(-EAGAIN); 1064 return ERR_PTR(-EAGAIN);
@@ -978,7 +1066,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
978 1066
979 /* We reserved something on the buffer */ 1067 /* We reserved something on the buffer */
980 1068
981 BUG_ON(write > BUF_PAGE_SIZE); 1069 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1070 return NULL;
982 1071
983 event = __rb_page_index(tail_page, tail); 1072 event = __rb_page_index(tail_page, tail);
984 rb_update_event(event, type, length); 1073 rb_update_event(event, type, length);
@@ -988,12 +1077,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
988 * this page's time stamp. 1077 * this page's time stamp.
989 */ 1078 */
990 if (!tail && rb_is_commit(cpu_buffer, event)) 1079 if (!tail && rb_is_commit(cpu_buffer, event))
991 cpu_buffer->commit_page->time_stamp = *ts; 1080 cpu_buffer->commit_page->page->time_stamp = *ts;
992 1081
993 return event; 1082 return event;
994 1083
995 out_unlock: 1084 out_unlock:
996 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1085 __raw_spin_unlock(&cpu_buffer->lock);
1086 local_irq_restore(flags);
997 return NULL; 1087 return NULL;
998} 1088}
999 1089
@@ -1038,7 +1128,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1038 event->time_delta = *delta & TS_MASK; 1128 event->time_delta = *delta & TS_MASK;
1039 event->array[0] = *delta >> TS_SHIFT; 1129 event->array[0] = *delta >> TS_SHIFT;
1040 } else { 1130 } else {
1041 cpu_buffer->commit_page->time_stamp = *ts; 1131 cpu_buffer->commit_page->page->time_stamp = *ts;
1042 event->time_delta = 0; 1132 event->time_delta = 0;
1043 event->array[0] = 0; 1133 event->array[0] = 0;
1044 } 1134 }
@@ -1076,10 +1166,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1076 * storm or we have something buggy. 1166 * storm or we have something buggy.
1077 * Bail! 1167 * Bail!
1078 */ 1168 */
1079 if (unlikely(++nr_loops > 1000)) { 1169 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1080 RB_WARN_ON(cpu_buffer, 1);
1081 return NULL; 1170 return NULL;
1082 }
1083 1171
1084 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1172 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1085 1173
@@ -1175,15 +1263,14 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1175 struct ring_buffer_event *event; 1263 struct ring_buffer_event *event;
1176 int cpu, resched; 1264 int cpu, resched;
1177 1265
1178 if (ring_buffers_off) 1266 if (ring_buffer_flags != RB_BUFFERS_ON)
1179 return NULL; 1267 return NULL;
1180 1268
1181 if (atomic_read(&buffer->record_disabled)) 1269 if (atomic_read(&buffer->record_disabled))
1182 return NULL; 1270 return NULL;
1183 1271
1184 /* If we are tracing schedule, we don't want to recurse */ 1272 /* If we are tracing schedule, we don't want to recurse */
1185 resched = need_resched(); 1273 resched = ftrace_preempt_disable();
1186 preempt_disable_notrace();
1187 1274
1188 cpu = raw_smp_processor_id(); 1275 cpu = raw_smp_processor_id();
1189 1276
@@ -1214,10 +1301,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1214 return event; 1301 return event;
1215 1302
1216 out: 1303 out:
1217 if (resched) 1304 ftrace_preempt_enable(resched);
1218 preempt_enable_no_resched_notrace();
1219 else
1220 preempt_enable_notrace();
1221 return NULL; 1305 return NULL;
1222} 1306}
1223 1307
@@ -1259,12 +1343,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1259 /* 1343 /*
1260 * Only the last preempt count needs to restore preemption. 1344 * Only the last preempt count needs to restore preemption.
1261 */ 1345 */
1262 if (preempt_count() == 1) { 1346 if (preempt_count() == 1)
1263 if (per_cpu(rb_need_resched, cpu)) 1347 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1264 preempt_enable_no_resched_notrace(); 1348 else
1265 else
1266 preempt_enable_notrace();
1267 } else
1268 preempt_enable_no_resched_notrace(); 1349 preempt_enable_no_resched_notrace();
1269 1350
1270 return 0; 1351 return 0;
@@ -1294,14 +1375,13 @@ int ring_buffer_write(struct ring_buffer *buffer,
1294 int ret = -EBUSY; 1375 int ret = -EBUSY;
1295 int cpu, resched; 1376 int cpu, resched;
1296 1377
1297 if (ring_buffers_off) 1378 if (ring_buffer_flags != RB_BUFFERS_ON)
1298 return -EBUSY; 1379 return -EBUSY;
1299 1380
1300 if (atomic_read(&buffer->record_disabled)) 1381 if (atomic_read(&buffer->record_disabled))
1301 return -EBUSY; 1382 return -EBUSY;
1302 1383
1303 resched = need_resched(); 1384 resched = ftrace_preempt_disable();
1304 preempt_disable_notrace();
1305 1385
1306 cpu = raw_smp_processor_id(); 1386 cpu = raw_smp_processor_id();
1307 1387
@@ -1327,10 +1407,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1327 1407
1328 ret = 0; 1408 ret = 0;
1329 out: 1409 out:
1330 if (resched) 1410 ftrace_preempt_enable(resched);
1331 preempt_enable_no_resched_notrace();
1332 else
1333 preempt_enable_notrace();
1334 1411
1335 return ret; 1412 return ret;
1336} 1413}
@@ -1489,14 +1566,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1489 return overruns; 1566 return overruns;
1490} 1567}
1491 1568
1492/** 1569static void rb_iter_reset(struct ring_buffer_iter *iter)
1493 * ring_buffer_iter_reset - reset an iterator
1494 * @iter: The iterator to reset
1495 *
1496 * Resets the iterator, so that it will start from the beginning
1497 * again.
1498 */
1499void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1500{ 1570{
1501 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1571 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1502 1572
@@ -1511,7 +1581,24 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1511 if (iter->head) 1581 if (iter->head)
1512 iter->read_stamp = cpu_buffer->read_stamp; 1582 iter->read_stamp = cpu_buffer->read_stamp;
1513 else 1583 else
1514 iter->read_stamp = iter->head_page->time_stamp; 1584 iter->read_stamp = iter->head_page->page->time_stamp;
1585}
1586
1587/**
1588 * ring_buffer_iter_reset - reset an iterator
1589 * @iter: The iterator to reset
1590 *
1591 * Resets the iterator, so that it will start from the beginning
1592 * again.
1593 */
1594void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1595{
1596 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1597 unsigned long flags;
1598
1599 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1600 rb_iter_reset(iter);
1601 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1515} 1602}
1516 1603
1517/** 1604/**
@@ -1597,7 +1684,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1597 unsigned long flags; 1684 unsigned long flags;
1598 int nr_loops = 0; 1685 int nr_loops = 0;
1599 1686
1600 spin_lock_irqsave(&cpu_buffer->lock, flags); 1687 local_irq_save(flags);
1688 __raw_spin_lock(&cpu_buffer->lock);
1601 1689
1602 again: 1690 again:
1603 /* 1691 /*
@@ -1606,8 +1694,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1606 * a case where we will loop three times. There should be no 1694 * a case where we will loop three times. There should be no
1607 * reason to loop four times (that I know of). 1695 * reason to loop four times (that I know of).
1608 */ 1696 */
1609 if (unlikely(++nr_loops > 3)) { 1697 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1610 RB_WARN_ON(cpu_buffer, 1);
1611 reader = NULL; 1698 reader = NULL;
1612 goto out; 1699 goto out;
1613 } 1700 }
@@ -1619,8 +1706,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1619 goto out; 1706 goto out;
1620 1707
1621 /* Never should we have an index greater than the size */ 1708 /* Never should we have an index greater than the size */
1622 RB_WARN_ON(cpu_buffer, 1709 if (RB_WARN_ON(cpu_buffer,
1623 cpu_buffer->reader_page->read > rb_page_size(reader)); 1710 cpu_buffer->reader_page->read > rb_page_size(reader)))
1711 goto out;
1624 1712
1625 /* check if we caught up to the tail */ 1713 /* check if we caught up to the tail */
1626 reader = NULL; 1714 reader = NULL;
@@ -1637,7 +1725,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1637 cpu_buffer->reader_page->list.prev = reader->list.prev; 1725 cpu_buffer->reader_page->list.prev = reader->list.prev;
1638 1726
1639 local_set(&cpu_buffer->reader_page->write, 0); 1727 local_set(&cpu_buffer->reader_page->write, 0);
1640 local_set(&cpu_buffer->reader_page->commit, 0); 1728 local_set(&cpu_buffer->reader_page->page->commit, 0);
1641 1729
1642 /* Make the reader page now replace the head */ 1730 /* Make the reader page now replace the head */
1643 reader->list.prev->next = &cpu_buffer->reader_page->list; 1731 reader->list.prev->next = &cpu_buffer->reader_page->list;
@@ -1659,7 +1747,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1659 goto again; 1747 goto again;
1660 1748
1661 out: 1749 out:
1662 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1750 __raw_spin_unlock(&cpu_buffer->lock);
1751 local_irq_restore(flags);
1663 1752
1664 return reader; 1753 return reader;
1665} 1754}
@@ -1673,7 +1762,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1673 reader = rb_get_reader_page(cpu_buffer); 1762 reader = rb_get_reader_page(cpu_buffer);
1674 1763
1675 /* This function should not be called when buffer is empty */ 1764 /* This function should not be called when buffer is empty */
1676 BUG_ON(!reader); 1765 if (RB_WARN_ON(cpu_buffer, !reader))
1766 return;
1677 1767
1678 event = rb_reader_event(cpu_buffer); 1768 event = rb_reader_event(cpu_buffer);
1679 1769
@@ -1700,7 +1790,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1700 * Check if we are at the end of the buffer. 1790 * Check if we are at the end of the buffer.
1701 */ 1791 */
1702 if (iter->head >= rb_page_size(iter->head_page)) { 1792 if (iter->head >= rb_page_size(iter->head_page)) {
1703 BUG_ON(iter->head_page == cpu_buffer->commit_page); 1793 if (RB_WARN_ON(buffer,
1794 iter->head_page == cpu_buffer->commit_page))
1795 return;
1704 rb_inc_iter(iter); 1796 rb_inc_iter(iter);
1705 return; 1797 return;
1706 } 1798 }
@@ -1713,8 +1805,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1713 * This should not be called to advance the header if we are 1805 * This should not be called to advance the header if we are
1714 * at the tail of the buffer. 1806 * at the tail of the buffer.
1715 */ 1807 */
1716 BUG_ON((iter->head_page == cpu_buffer->commit_page) && 1808 if (RB_WARN_ON(cpu_buffer,
1717 (iter->head + length > rb_commit_index(cpu_buffer))); 1809 (iter->head_page == cpu_buffer->commit_page) &&
1810 (iter->head + length > rb_commit_index(cpu_buffer))))
1811 return;
1718 1812
1719 rb_update_iter_read_stamp(iter, event); 1813 rb_update_iter_read_stamp(iter, event);
1720 1814
@@ -1726,17 +1820,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1726 rb_advance_iter(iter); 1820 rb_advance_iter(iter);
1727} 1821}
1728 1822
1729/** 1823static struct ring_buffer_event *
1730 * ring_buffer_peek - peek at the next event to be read 1824rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1731 * @buffer: The ring buffer to read
1732 * @cpu: The cpu to peak at
1733 * @ts: The timestamp counter of this event.
1734 *
1735 * This will return the event that will be read next, but does
1736 * not consume the data.
1737 */
1738struct ring_buffer_event *
1739ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1740{ 1825{
1741 struct ring_buffer_per_cpu *cpu_buffer; 1826 struct ring_buffer_per_cpu *cpu_buffer;
1742 struct ring_buffer_event *event; 1827 struct ring_buffer_event *event;
@@ -1757,10 +1842,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1757 * can have. Nesting 10 deep of interrupts is clearly 1842 * can have. Nesting 10 deep of interrupts is clearly
1758 * an anomaly. 1843 * an anomaly.
1759 */ 1844 */
1760 if (unlikely(++nr_loops > 10)) { 1845 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1761 RB_WARN_ON(cpu_buffer, 1);
1762 return NULL; 1846 return NULL;
1763 }
1764 1847
1765 reader = rb_get_reader_page(cpu_buffer); 1848 reader = rb_get_reader_page(cpu_buffer);
1766 if (!reader) 1849 if (!reader)
@@ -1798,16 +1881,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1798 return NULL; 1881 return NULL;
1799} 1882}
1800 1883
1801/** 1884static struct ring_buffer_event *
1802 * ring_buffer_iter_peek - peek at the next event to be read 1885rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1803 * @iter: The ring buffer iterator
1804 * @ts: The timestamp counter of this event.
1805 *
1806 * This will return the event that will be read next, but does
1807 * not increment the iterator.
1808 */
1809struct ring_buffer_event *
1810ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1811{ 1886{
1812 struct ring_buffer *buffer; 1887 struct ring_buffer *buffer;
1813 struct ring_buffer_per_cpu *cpu_buffer; 1888 struct ring_buffer_per_cpu *cpu_buffer;
@@ -1829,10 +1904,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1829 * can have. Nesting 10 deep of interrupts is clearly 1904 * can have. Nesting 10 deep of interrupts is clearly
1830 * an anomaly. 1905 * an anomaly.
1831 */ 1906 */
1832 if (unlikely(++nr_loops > 10)) { 1907 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1833 RB_WARN_ON(cpu_buffer, 1);
1834 return NULL; 1908 return NULL;
1835 }
1836 1909
1837 if (rb_per_cpu_empty(cpu_buffer)) 1910 if (rb_per_cpu_empty(cpu_buffer))
1838 return NULL; 1911 return NULL;
@@ -1869,6 +1942,51 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1869} 1942}
1870 1943
1871/** 1944/**
1945 * ring_buffer_peek - peek at the next event to be read
1946 * @buffer: The ring buffer to read
1947 * @cpu: The cpu to peak at
1948 * @ts: The timestamp counter of this event.
1949 *
1950 * This will return the event that will be read next, but does
1951 * not consume the data.
1952 */
1953struct ring_buffer_event *
1954ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1955{
1956 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1957 struct ring_buffer_event *event;
1958 unsigned long flags;
1959
1960 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1961 event = rb_buffer_peek(buffer, cpu, ts);
1962 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1963
1964 return event;
1965}
1966
1967/**
1968 * ring_buffer_iter_peek - peek at the next event to be read
1969 * @iter: The ring buffer iterator
1970 * @ts: The timestamp counter of this event.
1971 *
1972 * This will return the event that will be read next, but does
1973 * not increment the iterator.
1974 */
1975struct ring_buffer_event *
1976ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1977{
1978 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1979 struct ring_buffer_event *event;
1980 unsigned long flags;
1981
1982 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1983 event = rb_iter_peek(iter, ts);
1984 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1985
1986 return event;
1987}
1988
1989/**
1872 * ring_buffer_consume - return an event and consume it 1990 * ring_buffer_consume - return an event and consume it
1873 * @buffer: The ring buffer to get the next event from 1991 * @buffer: The ring buffer to get the next event from
1874 * 1992 *
@@ -1879,19 +1997,24 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1879struct ring_buffer_event * 1997struct ring_buffer_event *
1880ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 1998ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1881{ 1999{
1882 struct ring_buffer_per_cpu *cpu_buffer; 2000 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1883 struct ring_buffer_event *event; 2001 struct ring_buffer_event *event;
2002 unsigned long flags;
1884 2003
1885 if (!cpu_isset(cpu, buffer->cpumask)) 2004 if (!cpu_isset(cpu, buffer->cpumask))
1886 return NULL; 2005 return NULL;
1887 2006
1888 event = ring_buffer_peek(buffer, cpu, ts); 2007 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2008
2009 event = rb_buffer_peek(buffer, cpu, ts);
1889 if (!event) 2010 if (!event)
1890 return NULL; 2011 goto out;
1891 2012
1892 cpu_buffer = buffer->buffers[cpu];
1893 rb_advance_reader(cpu_buffer); 2013 rb_advance_reader(cpu_buffer);
1894 2014
2015 out:
2016 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2017
1895 return event; 2018 return event;
1896} 2019}
1897 2020
@@ -1928,9 +2051,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1928 atomic_inc(&cpu_buffer->record_disabled); 2051 atomic_inc(&cpu_buffer->record_disabled);
1929 synchronize_sched(); 2052 synchronize_sched();
1930 2053
1931 spin_lock_irqsave(&cpu_buffer->lock, flags); 2054 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1932 ring_buffer_iter_reset(iter); 2055 __raw_spin_lock(&cpu_buffer->lock);
1933 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 2056 rb_iter_reset(iter);
2057 __raw_spin_unlock(&cpu_buffer->lock);
2058 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1934 2059
1935 return iter; 2060 return iter;
1936} 2061}
@@ -1962,12 +2087,17 @@ struct ring_buffer_event *
1962ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 2087ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1963{ 2088{
1964 struct ring_buffer_event *event; 2089 struct ring_buffer_event *event;
2090 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2091 unsigned long flags;
1965 2092
1966 event = ring_buffer_iter_peek(iter, ts); 2093 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2094 event = rb_iter_peek(iter, ts);
1967 if (!event) 2095 if (!event)
1968 return NULL; 2096 goto out;
1969 2097
1970 rb_advance_iter(iter); 2098 rb_advance_iter(iter);
2099 out:
2100 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1971 2101
1972 return event; 2102 return event;
1973} 2103}
@@ -1987,7 +2117,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1987 cpu_buffer->head_page 2117 cpu_buffer->head_page
1988 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 2118 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1989 local_set(&cpu_buffer->head_page->write, 0); 2119 local_set(&cpu_buffer->head_page->write, 0);
1990 local_set(&cpu_buffer->head_page->commit, 0); 2120 local_set(&cpu_buffer->head_page->page->commit, 0);
1991 2121
1992 cpu_buffer->head_page->read = 0; 2122 cpu_buffer->head_page->read = 0;
1993 2123
@@ -1996,7 +2126,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1996 2126
1997 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 2127 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1998 local_set(&cpu_buffer->reader_page->write, 0); 2128 local_set(&cpu_buffer->reader_page->write, 0);
1999 local_set(&cpu_buffer->reader_page->commit, 0); 2129 local_set(&cpu_buffer->reader_page->page->commit, 0);
2000 cpu_buffer->reader_page->read = 0; 2130 cpu_buffer->reader_page->read = 0;
2001 2131
2002 cpu_buffer->overrun = 0; 2132 cpu_buffer->overrun = 0;
@@ -2016,11 +2146,15 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2016 if (!cpu_isset(cpu, buffer->cpumask)) 2146 if (!cpu_isset(cpu, buffer->cpumask))
2017 return; 2147 return;
2018 2148
2019 spin_lock_irqsave(&cpu_buffer->lock, flags); 2149 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2150
2151 __raw_spin_lock(&cpu_buffer->lock);
2020 2152
2021 rb_reset_cpu(cpu_buffer); 2153 rb_reset_cpu(cpu_buffer);
2022 2154
2023 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 2155 __raw_spin_unlock(&cpu_buffer->lock);
2156
2157 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2024} 2158}
2025 2159
2026/** 2160/**
@@ -2090,8 +2224,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2090 return -EINVAL; 2224 return -EINVAL;
2091 2225
2092 /* At least make sure the two buffers are somewhat the same */ 2226 /* At least make sure the two buffers are somewhat the same */
2093 if (buffer_a->size != buffer_b->size || 2227 if (buffer_a->pages != buffer_b->pages)
2094 buffer_a->pages != buffer_b->pages)
2095 return -EINVAL; 2228 return -EINVAL;
2096 2229
2097 cpu_buffer_a = buffer_a->buffers[cpu]; 2230 cpu_buffer_a = buffer_a->buffers[cpu];
@@ -2118,16 +2251,178 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2118 return 0; 2251 return 0;
2119} 2252}
2120 2253
2254static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2255 struct buffer_data_page *bpage)
2256{
2257 struct ring_buffer_event *event;
2258 unsigned long head;
2259
2260 __raw_spin_lock(&cpu_buffer->lock);
2261 for (head = 0; head < local_read(&bpage->commit);
2262 head += rb_event_length(event)) {
2263
2264 event = __rb_data_page_index(bpage, head);
2265 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2266 return;
2267 /* Only count data entries */
2268 if (event->type != RINGBUF_TYPE_DATA)
2269 continue;
2270 cpu_buffer->entries--;
2271 }
2272 __raw_spin_unlock(&cpu_buffer->lock);
2273}
2274
2275/**
2276 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2277 * @buffer: the buffer to allocate for.
2278 *
2279 * This function is used in conjunction with ring_buffer_read_page.
2280 * When reading a full page from the ring buffer, these functions
2281 * can be used to speed up the process. The calling function should
2282 * allocate a few pages first with this function. Then when it
2283 * needs to get pages from the ring buffer, it passes the result
2284 * of this function into ring_buffer_read_page, which will swap
2285 * the page that was allocated, with the read page of the buffer.
2286 *
2287 * Returns:
2288 * The page allocated, or NULL on error.
2289 */
2290void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2291{
2292 unsigned long addr;
2293 struct buffer_data_page *bpage;
2294
2295 addr = __get_free_page(GFP_KERNEL);
2296 if (!addr)
2297 return NULL;
2298
2299 bpage = (void *)addr;
2300
2301 return bpage;
2302}
2303
2304/**
2305 * ring_buffer_free_read_page - free an allocated read page
2306 * @buffer: the buffer the page was allocate for
2307 * @data: the page to free
2308 *
2309 * Free a page allocated from ring_buffer_alloc_read_page.
2310 */
2311void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2312{
2313 free_page((unsigned long)data);
2314}
2315
2316/**
2317 * ring_buffer_read_page - extract a page from the ring buffer
2318 * @buffer: buffer to extract from
2319 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2320 * @cpu: the cpu of the buffer to extract
2321 * @full: should the extraction only happen when the page is full.
2322 *
2323 * This function will pull out a page from the ring buffer and consume it.
2324 * @data_page must be the address of the variable that was returned
2325 * from ring_buffer_alloc_read_page. This is because the page might be used
2326 * to swap with a page in the ring buffer.
2327 *
2328 * for example:
2329 * rpage = ring_buffer_alloc_page(buffer);
2330 * if (!rpage)
2331 * return error;
2332 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2333 * if (ret)
2334 * process_page(rpage);
2335 *
2336 * When @full is set, the function will not return true unless
2337 * the writer is off the reader page.
2338 *
2339 * Note: it is up to the calling functions to handle sleeps and wakeups.
2340 * The ring buffer can be used anywhere in the kernel and can not
2341 * blindly call wake_up. The layer that uses the ring buffer must be
2342 * responsible for that.
2343 *
2344 * Returns:
2345 * 1 if data has been transferred
2346 * 0 if no data has been transferred.
2347 */
2348int ring_buffer_read_page(struct ring_buffer *buffer,
2349 void **data_page, int cpu, int full)
2350{
2351 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2352 struct ring_buffer_event *event;
2353 struct buffer_data_page *bpage;
2354 unsigned long flags;
2355 int ret = 0;
2356
2357 if (!data_page)
2358 return 0;
2359
2360 bpage = *data_page;
2361 if (!bpage)
2362 return 0;
2363
2364 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2365
2366 /*
2367 * rb_buffer_peek will get the next ring buffer if
2368 * the current reader page is empty.
2369 */
2370 event = rb_buffer_peek(buffer, cpu, NULL);
2371 if (!event)
2372 goto out;
2373
2374 /* check for data */
2375 if (!local_read(&cpu_buffer->reader_page->page->commit))
2376 goto out;
2377 /*
2378 * If the writer is already off of the read page, then simply
2379 * switch the read page with the given page. Otherwise
2380 * we need to copy the data from the reader to the writer.
2381 */
2382 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2383 unsigned int read = cpu_buffer->reader_page->read;
2384
2385 if (full)
2386 goto out;
2387 /* The writer is still on the reader page, we must copy */
2388 bpage = cpu_buffer->reader_page->page;
2389 memcpy(bpage->data,
2390 cpu_buffer->reader_page->page->data + read,
2391 local_read(&bpage->commit) - read);
2392
2393 /* consume what was read */
2394 cpu_buffer->reader_page += read;
2395
2396 } else {
2397 /* swap the pages */
2398 rb_init_page(bpage);
2399 bpage = cpu_buffer->reader_page->page;
2400 cpu_buffer->reader_page->page = *data_page;
2401 cpu_buffer->reader_page->read = 0;
2402 *data_page = bpage;
2403 }
2404 ret = 1;
2405
2406 /* update the entry counter */
2407 rb_remove_entries(cpu_buffer, bpage);
2408 out:
2409 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2410
2411 return ret;
2412}
2413
2121static ssize_t 2414static ssize_t
2122rb_simple_read(struct file *filp, char __user *ubuf, 2415rb_simple_read(struct file *filp, char __user *ubuf,
2123 size_t cnt, loff_t *ppos) 2416 size_t cnt, loff_t *ppos)
2124{ 2417{
2125 int *p = filp->private_data; 2418 long *p = filp->private_data;
2126 char buf[64]; 2419 char buf[64];
2127 int r; 2420 int r;
2128 2421
2129 /* !ring_buffers_off == tracing_on */ 2422 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2130 r = sprintf(buf, "%d\n", !*p); 2423 r = sprintf(buf, "permanently disabled\n");
2424 else
2425 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2131 2426
2132 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2427 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2133} 2428}
@@ -2136,7 +2431,7 @@ static ssize_t
2136rb_simple_write(struct file *filp, const char __user *ubuf, 2431rb_simple_write(struct file *filp, const char __user *ubuf,
2137 size_t cnt, loff_t *ppos) 2432 size_t cnt, loff_t *ppos)
2138{ 2433{
2139 int *p = filp->private_data; 2434 long *p = filp->private_data;
2140 char buf[64]; 2435 char buf[64];
2141 long val; 2436 long val;
2142 int ret; 2437 int ret;
@@ -2153,8 +2448,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2153 if (ret < 0) 2448 if (ret < 0)
2154 return ret; 2449 return ret;
2155 2450
2156 /* !ring_buffers_off == tracing_on */ 2451 if (val)
2157 *p = !val; 2452 set_bit(RB_BUFFERS_ON_BIT, p);
2453 else
2454 clear_bit(RB_BUFFERS_ON_BIT, p);
2158 2455
2159 (*ppos)++; 2456 (*ppos)++;
2160 2457
@@ -2176,7 +2473,7 @@ static __init int rb_init_debugfs(void)
2176 d_tracer = tracing_init_dentry(); 2473 d_tracer = tracing_init_dentry();
2177 2474
2178 entry = debugfs_create_file("tracing_on", 0644, d_tracer, 2475 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2179 &ring_buffers_off, &rb_simple_fops); 2476 &ring_buffer_flags, &rb_simple_fops);
2180 if (!entry) 2477 if (!entry)
2181 pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2478 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2182 2479
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1ee9e4e454a0..f4bb3800318b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,6 +30,7 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/seq_file.h>
33#include <linux/writeback.h> 34#include <linux/writeback.h>
34 35
35#include <linux/stacktrace.h> 36#include <linux/stacktrace.h>
@@ -43,6 +44,38 @@
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 44unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
44unsigned long __read_mostly tracing_thresh; 45unsigned long __read_mostly tracing_thresh;
45 46
47/*
48 * We need to change this state when a selftest is running.
49 * A selftest will lurk into the ring-buffer to count the
50 * entries inserted during the selftest although some concurrent
51 * insertions into the ring-buffer such as ftrace_printk could occurred
52 * at the same time, giving false positive or negative results.
53 */
54static bool __read_mostly tracing_selftest_running;
55
56/* For tracers that don't implement custom flags */
57static struct tracer_opt dummy_tracer_opt[] = {
58 { }
59};
60
61static struct tracer_flags dummy_tracer_flags = {
62 .val = 0,
63 .opts = dummy_tracer_opt
64};
65
66static int dummy_set_flag(u32 old_flags, u32 bit, int set)
67{
68 return 0;
69}
70
71/*
72 * Kill all tracing for good (never come back).
73 * It is initialized to 1 but will turn to zero if the initialization
74 * of the tracer is successful. But that is the only place that sets
75 * this back to zero.
76 */
77int tracing_disabled = 1;
78
46static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 79static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47 80
48static inline void ftrace_disable_cpu(void) 81static inline void ftrace_disable_cpu(void)
@@ -62,7 +95,36 @@ static cpumask_t __read_mostly tracing_buffer_mask;
62#define for_each_tracing_cpu(cpu) \ 95#define for_each_tracing_cpu(cpu) \
63 for_each_cpu_mask(cpu, tracing_buffer_mask) 96 for_each_cpu_mask(cpu, tracing_buffer_mask)
64 97
65static int tracing_disabled = 1; 98/*
99 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
100 *
101 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
102 * is set, then ftrace_dump is called. This will output the contents
103 * of the ftrace buffers to the console. This is very useful for
104 * capturing traces that lead to crashes and outputing it to a
105 * serial console.
106 *
107 * It is default off, but you can enable it with either specifying
108 * "ftrace_dump_on_oops" in the kernel command line, or setting
109 * /proc/sys/kernel/ftrace_dump_on_oops to true.
110 */
111int ftrace_dump_on_oops;
112
113static int tracing_set_tracer(char *buf);
114
115static int __init set_ftrace(char *str)
116{
117 tracing_set_tracer(str);
118 return 1;
119}
120__setup("ftrace", set_ftrace);
121
122static int __init set_ftrace_dump_on_oops(char *str)
123{
124 ftrace_dump_on_oops = 1;
125 return 1;
126}
127__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
66 128
67long 129long
68ns2usecs(cycle_t nsec) 130ns2usecs(cycle_t nsec)
@@ -112,6 +174,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
112/* tracer_enabled is used to toggle activation of a tracer */ 174/* tracer_enabled is used to toggle activation of a tracer */
113static int tracer_enabled = 1; 175static int tracer_enabled = 1;
114 176
177/**
178 * tracing_is_enabled - return tracer_enabled status
179 *
180 * This function is used by other tracers to know the status
181 * of the tracer_enabled flag. Tracers may use this function
182 * to know if it should enable their features when starting
183 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
184 */
185int tracing_is_enabled(void)
186{
187 return tracer_enabled;
188}
189
115/* function tracing enabled */ 190/* function tracing enabled */
116int ftrace_function_enabled; 191int ftrace_function_enabled;
117 192
@@ -153,8 +228,9 @@ static DEFINE_MUTEX(trace_types_lock);
153/* trace_wait is a waitqueue for tasks blocked on trace_poll */ 228/* trace_wait is a waitqueue for tasks blocked on trace_poll */
154static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 229static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
155 230
156/* trace_flags holds iter_ctrl options */ 231/* trace_flags holds trace_options default values */
157unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 232unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
233 TRACE_ITER_ANNOTATE;
158 234
159/** 235/**
160 * trace_wake_up - wake up tasks waiting for trace input 236 * trace_wake_up - wake up tasks waiting for trace input
@@ -193,13 +269,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
193 return nsecs / 1000; 269 return nsecs / 1000;
194} 270}
195 271
196/*
197 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
198 * control the output of kernel symbols.
199 */
200#define TRACE_ITER_SYM_MASK \
201 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
202
203/* These must match the bit postions in trace_iterator_flags */ 272/* These must match the bit postions in trace_iterator_flags */
204static const char *trace_options[] = { 273static const char *trace_options[] = {
205 "print-parent", 274 "print-parent",
@@ -213,6 +282,12 @@ static const char *trace_options[] = {
213 "stacktrace", 282 "stacktrace",
214 "sched-tree", 283 "sched-tree",
215 "ftrace_printk", 284 "ftrace_printk",
285 "ftrace_preempt",
286 "branch",
287 "annotate",
288 "userstacktrace",
289 "sym-userobj",
290 "printk-msg-only",
216 NULL 291 NULL
217}; 292};
218 293
@@ -359,6 +434,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
359 return trace_seq_putmem(s, hex, j); 434 return trace_seq_putmem(s, hex, j);
360} 435}
361 436
437static int
438trace_seq_path(struct trace_seq *s, struct path *path)
439{
440 unsigned char *p;
441
442 if (s->len >= (PAGE_SIZE - 1))
443 return 0;
444 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
445 if (!IS_ERR(p)) {
446 p = mangle_path(s->buffer + s->len, p, "\n");
447 if (p) {
448 s->len = p - s->buffer;
449 return 1;
450 }
451 } else {
452 s->buffer[s->len++] = '?';
453 return 1;
454 }
455
456 return 0;
457}
458
362static void 459static void
363trace_seq_reset(struct trace_seq *s) 460trace_seq_reset(struct trace_seq *s)
364{ 461{
@@ -470,7 +567,17 @@ int register_tracer(struct tracer *type)
470 return -1; 567 return -1;
471 } 568 }
472 569
570 /*
571 * When this gets called we hold the BKL which means that
572 * preemption is disabled. Various trace selftests however
573 * need to disable and enable preemption for successful tests.
574 * So we drop the BKL here and grab it after the tests again.
575 */
576 unlock_kernel();
473 mutex_lock(&trace_types_lock); 577 mutex_lock(&trace_types_lock);
578
579 tracing_selftest_running = true;
580
474 for (t = trace_types; t; t = t->next) { 581 for (t = trace_types; t; t = t->next) {
475 if (strcmp(type->name, t->name) == 0) { 582 if (strcmp(type->name, t->name) == 0) {
476 /* already found */ 583 /* already found */
@@ -481,12 +588,20 @@ int register_tracer(struct tracer *type)
481 } 588 }
482 } 589 }
483 590
591 if (!type->set_flag)
592 type->set_flag = &dummy_set_flag;
593 if (!type->flags)
594 type->flags = &dummy_tracer_flags;
595 else
596 if (!type->flags->opts)
597 type->flags->opts = dummy_tracer_opt;
598
484#ifdef CONFIG_FTRACE_STARTUP_TEST 599#ifdef CONFIG_FTRACE_STARTUP_TEST
485 if (type->selftest) { 600 if (type->selftest) {
486 struct tracer *saved_tracer = current_trace; 601 struct tracer *saved_tracer = current_trace;
487 struct trace_array *tr = &global_trace; 602 struct trace_array *tr = &global_trace;
488 int saved_ctrl = tr->ctrl;
489 int i; 603 int i;
604
490 /* 605 /*
491 * Run a selftest on this tracer. 606 * Run a selftest on this tracer.
492 * Here we reset the trace buffer, and set the current 607 * Here we reset the trace buffer, and set the current
@@ -494,25 +609,23 @@ int register_tracer(struct tracer *type)
494 * internal tracing to verify that everything is in order. 609 * internal tracing to verify that everything is in order.
495 * If we fail, we do not register this tracer. 610 * If we fail, we do not register this tracer.
496 */ 611 */
497 for_each_tracing_cpu(i) { 612 for_each_tracing_cpu(i)
498 tracing_reset(tr, i); 613 tracing_reset(tr, i);
499 } 614
500 current_trace = type; 615 current_trace = type;
501 tr->ctrl = 0;
502 /* the test is responsible for initializing and enabling */ 616 /* the test is responsible for initializing and enabling */
503 pr_info("Testing tracer %s: ", type->name); 617 pr_info("Testing tracer %s: ", type->name);
504 ret = type->selftest(type, tr); 618 ret = type->selftest(type, tr);
505 /* the test is responsible for resetting too */ 619 /* the test is responsible for resetting too */
506 current_trace = saved_tracer; 620 current_trace = saved_tracer;
507 tr->ctrl = saved_ctrl;
508 if (ret) { 621 if (ret) {
509 printk(KERN_CONT "FAILED!\n"); 622 printk(KERN_CONT "FAILED!\n");
510 goto out; 623 goto out;
511 } 624 }
512 /* Only reset on passing, to avoid touching corrupted buffers */ 625 /* Only reset on passing, to avoid touching corrupted buffers */
513 for_each_tracing_cpu(i) { 626 for_each_tracing_cpu(i)
514 tracing_reset(tr, i); 627 tracing_reset(tr, i);
515 } 628
516 printk(KERN_CONT "PASSED\n"); 629 printk(KERN_CONT "PASSED\n");
517 } 630 }
518#endif 631#endif
@@ -524,7 +637,9 @@ int register_tracer(struct tracer *type)
524 max_tracer_type_len = len; 637 max_tracer_type_len = len;
525 638
526 out: 639 out:
640 tracing_selftest_running = false;
527 mutex_unlock(&trace_types_lock); 641 mutex_unlock(&trace_types_lock);
642 lock_kernel();
528 643
529 return ret; 644 return ret;
530} 645}
@@ -564,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
564 ftrace_enable_cpu(); 679 ftrace_enable_cpu();
565} 680}
566 681
682void tracing_reset_online_cpus(struct trace_array *tr)
683{
684 int cpu;
685
686 tr->time_start = ftrace_now(tr->cpu);
687
688 for_each_online_cpu(cpu)
689 tracing_reset(tr, cpu);
690}
691
567#define SAVED_CMDLINES 128 692#define SAVED_CMDLINES 128
568static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 693static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
569static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 694static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -581,6 +706,91 @@ static void trace_init_cmdlines(void)
581 cmdline_idx = 0; 706 cmdline_idx = 0;
582} 707}
583 708
709static int trace_stop_count;
710static DEFINE_SPINLOCK(tracing_start_lock);
711
712/**
713 * ftrace_off_permanent - disable all ftrace code permanently
714 *
715 * This should only be called when a serious anomally has
716 * been detected. This will turn off the function tracing,
717 * ring buffers, and other tracing utilites. It takes no
718 * locks and can be called from any context.
719 */
720void ftrace_off_permanent(void)
721{
722 tracing_disabled = 1;
723 ftrace_stop();
724 tracing_off_permanent();
725}
726
727/**
728 * tracing_start - quick start of the tracer
729 *
730 * If tracing is enabled but was stopped by tracing_stop,
731 * this will start the tracer back up.
732 */
733void tracing_start(void)
734{
735 struct ring_buffer *buffer;
736 unsigned long flags;
737
738 if (tracing_disabled)
739 return;
740
741 spin_lock_irqsave(&tracing_start_lock, flags);
742 if (--trace_stop_count)
743 goto out;
744
745 if (trace_stop_count < 0) {
746 /* Someone screwed up their debugging */
747 WARN_ON_ONCE(1);
748 trace_stop_count = 0;
749 goto out;
750 }
751
752
753 buffer = global_trace.buffer;
754 if (buffer)
755 ring_buffer_record_enable(buffer);
756
757 buffer = max_tr.buffer;
758 if (buffer)
759 ring_buffer_record_enable(buffer);
760
761 ftrace_start();
762 out:
763 spin_unlock_irqrestore(&tracing_start_lock, flags);
764}
765
766/**
767 * tracing_stop - quick stop of the tracer
768 *
769 * Light weight way to stop tracing. Use in conjunction with
770 * tracing_start.
771 */
772void tracing_stop(void)
773{
774 struct ring_buffer *buffer;
775 unsigned long flags;
776
777 ftrace_stop();
778 spin_lock_irqsave(&tracing_start_lock, flags);
779 if (trace_stop_count++)
780 goto out;
781
782 buffer = global_trace.buffer;
783 if (buffer)
784 ring_buffer_record_disable(buffer);
785
786 buffer = max_tr.buffer;
787 if (buffer)
788 ring_buffer_record_disable(buffer);
789
790 out:
791 spin_unlock_irqrestore(&tracing_start_lock, flags);
792}
793
584void trace_stop_cmdline_recording(void); 794void trace_stop_cmdline_recording(void);
585 795
586static void trace_save_cmdline(struct task_struct *tsk) 796static void trace_save_cmdline(struct task_struct *tsk)
@@ -618,7 +828,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
618 spin_unlock(&trace_cmdline_lock); 828 spin_unlock(&trace_cmdline_lock);
619} 829}
620 830
621static char *trace_find_cmdline(int pid) 831char *trace_find_cmdline(int pid)
622{ 832{
623 char *cmdline = "<...>"; 833 char *cmdline = "<...>";
624 unsigned map; 834 unsigned map;
@@ -655,6 +865,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
655 865
656 entry->preempt_count = pc & 0xff; 866 entry->preempt_count = pc & 0xff;
657 entry->pid = (tsk) ? tsk->pid : 0; 867 entry->pid = (tsk) ? tsk->pid : 0;
868 entry->tgid = (tsk) ? tsk->tgid : 0;
658 entry->flags = 869 entry->flags =
659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 870#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 871 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -691,6 +902,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
691 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 902 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
692} 903}
693 904
905#ifdef CONFIG_FUNCTION_GRAPH_TRACER
906static void __trace_graph_entry(struct trace_array *tr,
907 struct trace_array_cpu *data,
908 struct ftrace_graph_ent *trace,
909 unsigned long flags,
910 int pc)
911{
912 struct ring_buffer_event *event;
913 struct ftrace_graph_ent_entry *entry;
914 unsigned long irq_flags;
915
916 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
917 return;
918
919 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
920 &irq_flags);
921 if (!event)
922 return;
923 entry = ring_buffer_event_data(event);
924 tracing_generic_entry_update(&entry->ent, flags, pc);
925 entry->ent.type = TRACE_GRAPH_ENT;
926 entry->graph_ent = *trace;
927 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
928}
929
930static void __trace_graph_return(struct trace_array *tr,
931 struct trace_array_cpu *data,
932 struct ftrace_graph_ret *trace,
933 unsigned long flags,
934 int pc)
935{
936 struct ring_buffer_event *event;
937 struct ftrace_graph_ret_entry *entry;
938 unsigned long irq_flags;
939
940 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
941 return;
942
943 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
944 &irq_flags);
945 if (!event)
946 return;
947 entry = ring_buffer_event_data(event);
948 tracing_generic_entry_update(&entry->ent, flags, pc);
949 entry->ent.type = TRACE_GRAPH_RET;
950 entry->ret = *trace;
951 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
952}
953#endif
954
694void 955void
695ftrace(struct trace_array *tr, struct trace_array_cpu *data, 956ftrace(struct trace_array *tr, struct trace_array_cpu *data,
696 unsigned long ip, unsigned long parent_ip, unsigned long flags, 957 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -742,6 +1003,46 @@ void __trace_stack(struct trace_array *tr,
742 ftrace_trace_stack(tr, data, flags, skip, preempt_count()); 1003 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
743} 1004}
744 1005
1006static void ftrace_trace_userstack(struct trace_array *tr,
1007 struct trace_array_cpu *data,
1008 unsigned long flags, int pc)
1009{
1010#ifdef CONFIG_STACKTRACE
1011 struct ring_buffer_event *event;
1012 struct userstack_entry *entry;
1013 struct stack_trace trace;
1014 unsigned long irq_flags;
1015
1016 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1017 return;
1018
1019 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1020 &irq_flags);
1021 if (!event)
1022 return;
1023 entry = ring_buffer_event_data(event);
1024 tracing_generic_entry_update(&entry->ent, flags, pc);
1025 entry->ent.type = TRACE_USER_STACK;
1026
1027 memset(&entry->caller, 0, sizeof(entry->caller));
1028
1029 trace.nr_entries = 0;
1030 trace.max_entries = FTRACE_STACK_ENTRIES;
1031 trace.skip = 0;
1032 trace.entries = entry->caller;
1033
1034 save_stack_trace_user(&trace);
1035 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1036#endif
1037}
1038
1039void __trace_userstack(struct trace_array *tr,
1040 struct trace_array_cpu *data,
1041 unsigned long flags)
1042{
1043 ftrace_trace_userstack(tr, data, flags, preempt_count());
1044}
1045
745static void 1046static void
746ftrace_trace_special(void *__tr, void *__data, 1047ftrace_trace_special(void *__tr, void *__data,
747 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1048 unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -765,6 +1066,7 @@ ftrace_trace_special(void *__tr, void *__data,
765 entry->arg3 = arg3; 1066 entry->arg3 = arg3;
766 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1067 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
767 ftrace_trace_stack(tr, data, irq_flags, 4, pc); 1068 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
1069 ftrace_trace_userstack(tr, data, irq_flags, pc);
768 1070
769 trace_wake_up(); 1071 trace_wake_up();
770} 1072}
@@ -803,6 +1105,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
803 entry->next_cpu = task_cpu(next); 1105 entry->next_cpu = task_cpu(next);
804 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1106 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
805 ftrace_trace_stack(tr, data, flags, 5, pc); 1107 ftrace_trace_stack(tr, data, flags, 5, pc);
1108 ftrace_trace_userstack(tr, data, flags, pc);
806} 1109}
807 1110
808void 1111void
@@ -832,6 +1135,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
832 entry->next_cpu = task_cpu(wakee); 1135 entry->next_cpu = task_cpu(wakee);
833 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1136 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
834 ftrace_trace_stack(tr, data, flags, 6, pc); 1137 ftrace_trace_stack(tr, data, flags, 6, pc);
1138 ftrace_trace_userstack(tr, data, flags, pc);
835 1139
836 trace_wake_up(); 1140 trace_wake_up();
837} 1141}
@@ -841,26 +1145,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
841{ 1145{
842 struct trace_array *tr = &global_trace; 1146 struct trace_array *tr = &global_trace;
843 struct trace_array_cpu *data; 1147 struct trace_array_cpu *data;
1148 unsigned long flags;
844 int cpu; 1149 int cpu;
845 int pc; 1150 int pc;
846 1151
847 if (tracing_disabled || !tr->ctrl) 1152 if (tracing_disabled)
848 return; 1153 return;
849 1154
850 pc = preempt_count(); 1155 pc = preempt_count();
851 preempt_disable_notrace(); 1156 local_irq_save(flags);
852 cpu = raw_smp_processor_id(); 1157 cpu = raw_smp_processor_id();
853 data = tr->data[cpu]; 1158 data = tr->data[cpu];
854 1159
855 if (likely(!atomic_read(&data->disabled))) 1160 if (likely(atomic_inc_return(&data->disabled) == 1))
856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1161 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 1162
858 preempt_enable_notrace(); 1163 atomic_dec(&data->disabled);
1164 local_irq_restore(flags);
859} 1165}
860 1166
861#ifdef CONFIG_FUNCTION_TRACER 1167#ifdef CONFIG_FUNCTION_TRACER
862static void 1168static void
863function_trace_call(unsigned long ip, unsigned long parent_ip) 1169function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
864{ 1170{
865 struct trace_array *tr = &global_trace; 1171 struct trace_array *tr = &global_trace;
866 struct trace_array_cpu *data; 1172 struct trace_array_cpu *data;
@@ -873,8 +1179,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
873 return; 1179 return;
874 1180
875 pc = preempt_count(); 1181 pc = preempt_count();
876 resched = need_resched(); 1182 resched = ftrace_preempt_disable();
877 preempt_disable_notrace();
878 local_save_flags(flags); 1183 local_save_flags(flags);
879 cpu = raw_smp_processor_id(); 1184 cpu = raw_smp_processor_id();
880 data = tr->data[cpu]; 1185 data = tr->data[cpu];
@@ -884,12 +1189,97 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
884 trace_function(tr, data, ip, parent_ip, flags, pc); 1189 trace_function(tr, data, ip, parent_ip, flags, pc);
885 1190
886 atomic_dec(&data->disabled); 1191 atomic_dec(&data->disabled);
887 if (resched) 1192 ftrace_preempt_enable(resched);
888 preempt_enable_no_resched_notrace();
889 else
890 preempt_enable_notrace();
891} 1193}
892 1194
1195static void
1196function_trace_call(unsigned long ip, unsigned long parent_ip)
1197{
1198 struct trace_array *tr = &global_trace;
1199 struct trace_array_cpu *data;
1200 unsigned long flags;
1201 long disabled;
1202 int cpu;
1203 int pc;
1204
1205 if (unlikely(!ftrace_function_enabled))
1206 return;
1207
1208 /*
1209 * Need to use raw, since this must be called before the
1210 * recursive protection is performed.
1211 */
1212 local_irq_save(flags);
1213 cpu = raw_smp_processor_id();
1214 data = tr->data[cpu];
1215 disabled = atomic_inc_return(&data->disabled);
1216
1217 if (likely(disabled == 1)) {
1218 pc = preempt_count();
1219 trace_function(tr, data, ip, parent_ip, flags, pc);
1220 }
1221
1222 atomic_dec(&data->disabled);
1223 local_irq_restore(flags);
1224}
1225
1226#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1227int trace_graph_entry(struct ftrace_graph_ent *trace)
1228{
1229 struct trace_array *tr = &global_trace;
1230 struct trace_array_cpu *data;
1231 unsigned long flags;
1232 long disabled;
1233 int cpu;
1234 int pc;
1235
1236 if (!ftrace_trace_task(current))
1237 return 0;
1238
1239 if (!ftrace_graph_addr(trace->func))
1240 return 0;
1241
1242 local_irq_save(flags);
1243 cpu = raw_smp_processor_id();
1244 data = tr->data[cpu];
1245 disabled = atomic_inc_return(&data->disabled);
1246 if (likely(disabled == 1)) {
1247 pc = preempt_count();
1248 __trace_graph_entry(tr, data, trace, flags, pc);
1249 }
1250 /* Only do the atomic if it is not already set */
1251 if (!test_tsk_trace_graph(current))
1252 set_tsk_trace_graph(current);
1253 atomic_dec(&data->disabled);
1254 local_irq_restore(flags);
1255
1256 return 1;
1257}
1258
1259void trace_graph_return(struct ftrace_graph_ret *trace)
1260{
1261 struct trace_array *tr = &global_trace;
1262 struct trace_array_cpu *data;
1263 unsigned long flags;
1264 long disabled;
1265 int cpu;
1266 int pc;
1267
1268 local_irq_save(flags);
1269 cpu = raw_smp_processor_id();
1270 data = tr->data[cpu];
1271 disabled = atomic_inc_return(&data->disabled);
1272 if (likely(disabled == 1)) {
1273 pc = preempt_count();
1274 __trace_graph_return(tr, data, trace, flags, pc);
1275 }
1276 if (!trace->depth)
1277 clear_tsk_trace_graph(current);
1278 atomic_dec(&data->disabled);
1279 local_irq_restore(flags);
1280}
1281#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1282
893static struct ftrace_ops trace_ops __read_mostly = 1283static struct ftrace_ops trace_ops __read_mostly =
894{ 1284{
895 .func = function_trace_call, 1285 .func = function_trace_call,
@@ -898,9 +1288,14 @@ static struct ftrace_ops trace_ops __read_mostly =
898void tracing_start_function_trace(void) 1288void tracing_start_function_trace(void)
899{ 1289{
900 ftrace_function_enabled = 0; 1290 ftrace_function_enabled = 0;
1291
1292 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1293 trace_ops.func = function_trace_call_preempt_only;
1294 else
1295 trace_ops.func = function_trace_call;
1296
901 register_ftrace_function(&trace_ops); 1297 register_ftrace_function(&trace_ops);
902 if (tracer_enabled) 1298 ftrace_function_enabled = 1;
903 ftrace_function_enabled = 1;
904} 1299}
905 1300
906void tracing_stop_function_trace(void) 1301void tracing_stop_function_trace(void)
@@ -912,6 +1307,7 @@ void tracing_stop_function_trace(void)
912 1307
913enum trace_file_type { 1308enum trace_file_type {
914 TRACE_FILE_LAT_FMT = 1, 1309 TRACE_FILE_LAT_FMT = 1,
1310 TRACE_FILE_ANNOTATE = 2,
915}; 1311};
916 1312
917static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1313static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
@@ -1047,10 +1443,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1047 1443
1048 atomic_inc(&trace_record_cmdline_disabled); 1444 atomic_inc(&trace_record_cmdline_disabled);
1049 1445
1050 /* let the tracer grab locks here if needed */
1051 if (current_trace->start)
1052 current_trace->start(iter);
1053
1054 if (*pos != iter->pos) { 1446 if (*pos != iter->pos) {
1055 iter->ent = NULL; 1447 iter->ent = NULL;
1056 iter->cpu = 0; 1448 iter->cpu = 0;
@@ -1077,14 +1469,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1077 1469
1078static void s_stop(struct seq_file *m, void *p) 1470static void s_stop(struct seq_file *m, void *p)
1079{ 1471{
1080 struct trace_iterator *iter = m->private;
1081
1082 atomic_dec(&trace_record_cmdline_disabled); 1472 atomic_dec(&trace_record_cmdline_disabled);
1083
1084 /* let the tracer release locks here if needed */
1085 if (current_trace && current_trace == iter->trace && iter->trace->stop)
1086 iter->trace->stop(iter);
1087
1088 mutex_unlock(&trace_types_lock); 1473 mutex_unlock(&trace_types_lock);
1089} 1474}
1090 1475
@@ -1143,7 +1528,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1143# define IP_FMT "%016lx" 1528# define IP_FMT "%016lx"
1144#endif 1529#endif
1145 1530
1146static int 1531int
1147seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 1532seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1148{ 1533{
1149 int ret; 1534 int ret;
@@ -1164,6 +1549,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1164 return ret; 1549 return ret;
1165} 1550}
1166 1551
1552static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
1553 unsigned long ip, unsigned long sym_flags)
1554{
1555 struct file *file = NULL;
1556 unsigned long vmstart = 0;
1557 int ret = 1;
1558
1559 if (mm) {
1560 const struct vm_area_struct *vma;
1561
1562 down_read(&mm->mmap_sem);
1563 vma = find_vma(mm, ip);
1564 if (vma) {
1565 file = vma->vm_file;
1566 vmstart = vma->vm_start;
1567 }
1568 if (file) {
1569 ret = trace_seq_path(s, &file->f_path);
1570 if (ret)
1571 ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
1572 }
1573 up_read(&mm->mmap_sem);
1574 }
1575 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
1576 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1577 return ret;
1578}
1579
1580static int
1581seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
1582 unsigned long sym_flags)
1583{
1584 struct mm_struct *mm = NULL;
1585 int ret = 1;
1586 unsigned int i;
1587
1588 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1589 struct task_struct *task;
1590 /*
1591 * we do the lookup on the thread group leader,
1592 * since individual threads might have already quit!
1593 */
1594 rcu_read_lock();
1595 task = find_task_by_vpid(entry->ent.tgid);
1596 if (task)
1597 mm = get_task_mm(task);
1598 rcu_read_unlock();
1599 }
1600
1601 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1602 unsigned long ip = entry->caller[i];
1603
1604 if (ip == ULONG_MAX || !ret)
1605 break;
1606 if (i && ret)
1607 ret = trace_seq_puts(s, " <- ");
1608 if (!ip) {
1609 if (ret)
1610 ret = trace_seq_puts(s, "??");
1611 continue;
1612 }
1613 if (!ret)
1614 break;
1615 if (ret)
1616 ret = seq_print_user_ip(s, mm, ip, sym_flags);
1617 }
1618
1619 if (mm)
1620 mmput(mm);
1621 return ret;
1622}
1623
1167static void print_lat_help_header(struct seq_file *m) 1624static void print_lat_help_header(struct seq_file *m)
1168{ 1625{
1169 seq_puts(m, "# _------=> CPU# \n"); 1626 seq_puts(m, "# _------=> CPU# \n");
@@ -1301,6 +1758,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1301 1758
1302static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1759static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1303 1760
1761static int task_state_char(unsigned long state)
1762{
1763 int bit = state ? __ffs(state) + 1 : 0;
1764
1765 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1766}
1767
1304/* 1768/*
1305 * The message is supposed to contain an ending newline. 1769 * The message is supposed to contain an ending newline.
1306 * If the printing stops prematurely, try to add a newline of our own. 1770 * If the printing stops prematurely, try to add a newline of our own.
@@ -1338,6 +1802,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1338 trace_seq_putc(s, '\n'); 1802 trace_seq_putc(s, '\n');
1339} 1803}
1340 1804
1805static void test_cpu_buff_start(struct trace_iterator *iter)
1806{
1807 struct trace_seq *s = &iter->seq;
1808
1809 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1810 return;
1811
1812 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1813 return;
1814
1815 if (cpu_isset(iter->cpu, iter->started))
1816 return;
1817
1818 cpu_set(iter->cpu, iter->started);
1819 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1820}
1821
1341static enum print_line_t 1822static enum print_line_t
1342print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1823print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1343{ 1824{
@@ -1352,11 +1833,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1352 char *comm; 1833 char *comm;
1353 int S, T; 1834 int S, T;
1354 int i; 1835 int i;
1355 unsigned state;
1356 1836
1357 if (entry->type == TRACE_CONT) 1837 if (entry->type == TRACE_CONT)
1358 return TRACE_TYPE_HANDLED; 1838 return TRACE_TYPE_HANDLED;
1359 1839
1840 test_cpu_buff_start(iter);
1841
1360 next_entry = find_next_entry(iter, NULL, &next_ts); 1842 next_entry = find_next_entry(iter, NULL, &next_ts);
1361 if (!next_entry) 1843 if (!next_entry)
1362 next_ts = iter->ts; 1844 next_ts = iter->ts;
@@ -1396,12 +1878,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1396 1878
1397 trace_assign_type(field, entry); 1879 trace_assign_type(field, entry);
1398 1880
1399 T = field->next_state < sizeof(state_to_char) ? 1881 T = task_state_char(field->next_state);
1400 state_to_char[field->next_state] : 'X'; 1882 S = task_state_char(field->prev_state);
1401
1402 state = field->prev_state ?
1403 __ffs(field->prev_state) + 1 : 0;
1404 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1405 comm = trace_find_cmdline(field->next_pid); 1883 comm = trace_find_cmdline(field->next_pid);
1406 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1884 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1407 field->prev_pid, 1885 field->prev_pid,
@@ -1448,6 +1926,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1448 trace_seq_print_cont(s, iter); 1926 trace_seq_print_cont(s, iter);
1449 break; 1927 break;
1450 } 1928 }
1929 case TRACE_BRANCH: {
1930 struct trace_branch *field;
1931
1932 trace_assign_type(field, entry);
1933
1934 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1935 field->correct ? " ok " : " MISS ",
1936 field->func,
1937 field->file,
1938 field->line);
1939 break;
1940 }
1941 case TRACE_USER_STACK: {
1942 struct userstack_entry *field;
1943
1944 trace_assign_type(field, entry);
1945
1946 seq_print_userip_objs(field, s, sym_flags);
1947 trace_seq_putc(s, '\n');
1948 break;
1949 }
1451 default: 1950 default:
1452 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1951 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1453 } 1952 }
@@ -1472,6 +1971,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1472 if (entry->type == TRACE_CONT) 1971 if (entry->type == TRACE_CONT)
1473 return TRACE_TYPE_HANDLED; 1972 return TRACE_TYPE_HANDLED;
1474 1973
1974 test_cpu_buff_start(iter);
1975
1475 comm = trace_find_cmdline(iter->ent->pid); 1976 comm = trace_find_cmdline(iter->ent->pid);
1476 1977
1477 t = ns2usecs(iter->ts); 1978 t = ns2usecs(iter->ts);
@@ -1519,10 +2020,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1519 2020
1520 trace_assign_type(field, entry); 2021 trace_assign_type(field, entry);
1521 2022
1522 S = field->prev_state < sizeof(state_to_char) ? 2023 T = task_state_char(field->next_state);
1523 state_to_char[field->prev_state] : 'X'; 2024 S = task_state_char(field->prev_state);
1524 T = field->next_state < sizeof(state_to_char) ?
1525 state_to_char[field->next_state] : 'X';
1526 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", 2025 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1527 field->prev_pid, 2026 field->prev_pid,
1528 field->prev_prio, 2027 field->prev_prio,
@@ -1581,6 +2080,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1581 trace_seq_print_cont(s, iter); 2080 trace_seq_print_cont(s, iter);
1582 break; 2081 break;
1583 } 2082 }
2083 case TRACE_GRAPH_RET: {
2084 return print_graph_function(iter);
2085 }
2086 case TRACE_GRAPH_ENT: {
2087 return print_graph_function(iter);
2088 }
2089 case TRACE_BRANCH: {
2090 struct trace_branch *field;
2091
2092 trace_assign_type(field, entry);
2093
2094 trace_seq_printf(s, "[%s] %s:%s:%d\n",
2095 field->correct ? " ok " : " MISS ",
2096 field->func,
2097 field->file,
2098 field->line);
2099 break;
2100 }
2101 case TRACE_USER_STACK: {
2102 struct userstack_entry *field;
2103
2104 trace_assign_type(field, entry);
2105
2106 ret = seq_print_userip_objs(field, s, sym_flags);
2107 if (!ret)
2108 return TRACE_TYPE_PARTIAL_LINE;
2109 ret = trace_seq_putc(s, '\n');
2110 if (!ret)
2111 return TRACE_TYPE_PARTIAL_LINE;
2112 break;
2113 }
1584 } 2114 }
1585 return TRACE_TYPE_HANDLED; 2115 return TRACE_TYPE_HANDLED;
1586} 2116}
@@ -1621,12 +2151,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1621 2151
1622 trace_assign_type(field, entry); 2152 trace_assign_type(field, entry);
1623 2153
1624 S = field->prev_state < sizeof(state_to_char) ? 2154 T = task_state_char(field->next_state);
1625 state_to_char[field->prev_state] : 'X'; 2155 S = entry->type == TRACE_WAKE ? '+' :
1626 T = field->next_state < sizeof(state_to_char) ? 2156 task_state_char(field->prev_state);
1627 state_to_char[field->next_state] : 'X';
1628 if (entry->type == TRACE_WAKE)
1629 S = '+';
1630 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", 2157 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1631 field->prev_pid, 2158 field->prev_pid,
1632 field->prev_prio, 2159 field->prev_prio,
@@ -1640,6 +2167,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1640 break; 2167 break;
1641 } 2168 }
1642 case TRACE_SPECIAL: 2169 case TRACE_SPECIAL:
2170 case TRACE_USER_STACK:
1643 case TRACE_STACK: { 2171 case TRACE_STACK: {
1644 struct special_entry *field; 2172 struct special_entry *field;
1645 2173
@@ -1712,12 +2240,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1712 2240
1713 trace_assign_type(field, entry); 2241 trace_assign_type(field, entry);
1714 2242
1715 S = field->prev_state < sizeof(state_to_char) ? 2243 T = task_state_char(field->next_state);
1716 state_to_char[field->prev_state] : 'X'; 2244 S = entry->type == TRACE_WAKE ? '+' :
1717 T = field->next_state < sizeof(state_to_char) ? 2245 task_state_char(field->prev_state);
1718 state_to_char[field->next_state] : 'X';
1719 if (entry->type == TRACE_WAKE)
1720 S = '+';
1721 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 2246 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1722 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 2247 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1723 SEQ_PUT_HEX_FIELD_RET(s, S); 2248 SEQ_PUT_HEX_FIELD_RET(s, S);
@@ -1728,6 +2253,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1728 break; 2253 break;
1729 } 2254 }
1730 case TRACE_SPECIAL: 2255 case TRACE_SPECIAL:
2256 case TRACE_USER_STACK:
1731 case TRACE_STACK: { 2257 case TRACE_STACK: {
1732 struct special_entry *field; 2258 struct special_entry *field;
1733 2259
@@ -1744,6 +2270,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1744 return TRACE_TYPE_HANDLED; 2270 return TRACE_TYPE_HANDLED;
1745} 2271}
1746 2272
2273static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
2274{
2275 struct trace_seq *s = &iter->seq;
2276 struct trace_entry *entry = iter->ent;
2277 struct print_entry *field;
2278 int ret;
2279
2280 trace_assign_type(field, entry);
2281
2282 ret = trace_seq_printf(s, field->buf);
2283 if (!ret)
2284 return TRACE_TYPE_PARTIAL_LINE;
2285
2286 if (entry->flags & TRACE_FLAG_CONT)
2287 trace_seq_print_cont(s, iter);
2288
2289 return TRACE_TYPE_HANDLED;
2290}
2291
1747static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2292static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1748{ 2293{
1749 struct trace_seq *s = &iter->seq; 2294 struct trace_seq *s = &iter->seq;
@@ -1782,6 +2327,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1782 break; 2327 break;
1783 } 2328 }
1784 case TRACE_SPECIAL: 2329 case TRACE_SPECIAL:
2330 case TRACE_USER_STACK:
1785 case TRACE_STACK: { 2331 case TRACE_STACK: {
1786 struct special_entry *field; 2332 struct special_entry *field;
1787 2333
@@ -1823,6 +2369,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1823 return ret; 2369 return ret;
1824 } 2370 }
1825 2371
2372 if (iter->ent->type == TRACE_PRINT &&
2373 trace_flags & TRACE_ITER_PRINTK &&
2374 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2375 return print_printk_msg_only(iter);
2376
1826 if (trace_flags & TRACE_ITER_BIN) 2377 if (trace_flags & TRACE_ITER_BIN)
1827 return print_bin_fmt(iter); 2378 return print_bin_fmt(iter);
1828 2379
@@ -1847,7 +2398,9 @@ static int s_show(struct seq_file *m, void *v)
1847 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2398 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1848 seq_puts(m, "#\n"); 2399 seq_puts(m, "#\n");
1849 } 2400 }
1850 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2401 if (iter->trace && iter->trace->print_header)
2402 iter->trace->print_header(m);
2403 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1851 /* print nothing if the buffers are empty */ 2404 /* print nothing if the buffers are empty */
1852 if (trace_empty(iter)) 2405 if (trace_empty(iter))
1853 return 0; 2406 return 0;
@@ -1899,6 +2452,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1899 iter->trace = current_trace; 2452 iter->trace = current_trace;
1900 iter->pos = -1; 2453 iter->pos = -1;
1901 2454
2455 /* Notify the tracer early; before we stop tracing. */
2456 if (iter->trace && iter->trace->open)
2457 iter->trace->open(iter);
2458
2459 /* Annotate start of buffers if we had overruns */
2460 if (ring_buffer_overruns(iter->tr->buffer))
2461 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2462
2463
1902 for_each_tracing_cpu(cpu) { 2464 for_each_tracing_cpu(cpu) {
1903 2465
1904 iter->buffer_iter[cpu] = 2466 iter->buffer_iter[cpu] =
@@ -1917,13 +2479,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1917 m->private = iter; 2479 m->private = iter;
1918 2480
1919 /* stop the trace while dumping */ 2481 /* stop the trace while dumping */
1920 if (iter->tr->ctrl) { 2482 tracing_stop();
1921 tracer_enabled = 0;
1922 ftrace_function_enabled = 0;
1923 }
1924
1925 if (iter->trace && iter->trace->open)
1926 iter->trace->open(iter);
1927 2483
1928 mutex_unlock(&trace_types_lock); 2484 mutex_unlock(&trace_types_lock);
1929 2485
@@ -1966,14 +2522,7 @@ int tracing_release(struct inode *inode, struct file *file)
1966 iter->trace->close(iter); 2522 iter->trace->close(iter);
1967 2523
1968 /* reenable tracing if it was previously enabled */ 2524 /* reenable tracing if it was previously enabled */
1969 if (iter->tr->ctrl) { 2525 tracing_start();
1970 tracer_enabled = 1;
1971 /*
1972 * It is safe to enable function tracing even if it
1973 * isn't used
1974 */
1975 ftrace_function_enabled = 1;
1976 }
1977 mutex_unlock(&trace_types_lock); 2526 mutex_unlock(&trace_types_lock);
1978 2527
1979 seq_release(inode, file); 2528 seq_release(inode, file);
@@ -2151,7 +2700,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2151 if (err) 2700 if (err)
2152 goto err_unlock; 2701 goto err_unlock;
2153 2702
2154 raw_local_irq_disable(); 2703 local_irq_disable();
2155 __raw_spin_lock(&ftrace_max_lock); 2704 __raw_spin_lock(&ftrace_max_lock);
2156 for_each_tracing_cpu(cpu) { 2705 for_each_tracing_cpu(cpu) {
2157 /* 2706 /*
@@ -2168,7 +2717,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2168 } 2717 }
2169 } 2718 }
2170 __raw_spin_unlock(&ftrace_max_lock); 2719 __raw_spin_unlock(&ftrace_max_lock);
2171 raw_local_irq_enable(); 2720 local_irq_enable();
2172 2721
2173 tracing_cpumask = tracing_cpumask_new; 2722 tracing_cpumask = tracing_cpumask_new;
2174 2723
@@ -2189,13 +2738,16 @@ static struct file_operations tracing_cpumask_fops = {
2189}; 2738};
2190 2739
2191static ssize_t 2740static ssize_t
2192tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, 2741tracing_trace_options_read(struct file *filp, char __user *ubuf,
2193 size_t cnt, loff_t *ppos) 2742 size_t cnt, loff_t *ppos)
2194{ 2743{
2744 int i;
2195 char *buf; 2745 char *buf;
2196 int r = 0; 2746 int r = 0;
2197 int len = 0; 2747 int len = 0;
2198 int i; 2748 u32 tracer_flags = current_trace->flags->val;
2749 struct tracer_opt *trace_opts = current_trace->flags->opts;
2750
2199 2751
2200 /* calulate max size */ 2752 /* calulate max size */
2201 for (i = 0; trace_options[i]; i++) { 2753 for (i = 0; trace_options[i]; i++) {
@@ -2203,6 +2755,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2203 len += 3; /* "no" and space */ 2755 len += 3; /* "no" and space */
2204 } 2756 }
2205 2757
2758 /*
2759 * Increase the size with names of options specific
2760 * of the current tracer.
2761 */
2762 for (i = 0; trace_opts[i].name; i++) {
2763 len += strlen(trace_opts[i].name);
2764 len += 3; /* "no" and space */
2765 }
2766
2206 /* +2 for \n and \0 */ 2767 /* +2 for \n and \0 */
2207 buf = kmalloc(len + 2, GFP_KERNEL); 2768 buf = kmalloc(len + 2, GFP_KERNEL);
2208 if (!buf) 2769 if (!buf)
@@ -2215,6 +2776,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2215 r += sprintf(buf + r, "no%s ", trace_options[i]); 2776 r += sprintf(buf + r, "no%s ", trace_options[i]);
2216 } 2777 }
2217 2778
2779 for (i = 0; trace_opts[i].name; i++) {
2780 if (tracer_flags & trace_opts[i].bit)
2781 r += sprintf(buf + r, "%s ",
2782 trace_opts[i].name);
2783 else
2784 r += sprintf(buf + r, "no%s ",
2785 trace_opts[i].name);
2786 }
2787
2218 r += sprintf(buf + r, "\n"); 2788 r += sprintf(buf + r, "\n");
2219 WARN_ON(r >= len + 2); 2789 WARN_ON(r >= len + 2);
2220 2790
@@ -2225,13 +2795,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2225 return r; 2795 return r;
2226} 2796}
2227 2797
2798/* Try to assign a tracer specific option */
2799static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2800{
2801 struct tracer_flags *trace_flags = trace->flags;
2802 struct tracer_opt *opts = NULL;
2803 int ret = 0, i = 0;
2804 int len;
2805
2806 for (i = 0; trace_flags->opts[i].name; i++) {
2807 opts = &trace_flags->opts[i];
2808 len = strlen(opts->name);
2809
2810 if (strncmp(cmp, opts->name, len) == 0) {
2811 ret = trace->set_flag(trace_flags->val,
2812 opts->bit, !neg);
2813 break;
2814 }
2815 }
2816 /* Not found */
2817 if (!trace_flags->opts[i].name)
2818 return -EINVAL;
2819
2820 /* Refused to handle */
2821 if (ret)
2822 return ret;
2823
2824 if (neg)
2825 trace_flags->val &= ~opts->bit;
2826 else
2827 trace_flags->val |= opts->bit;
2828
2829 return 0;
2830}
2831
2228static ssize_t 2832static ssize_t
2229tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, 2833tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2230 size_t cnt, loff_t *ppos) 2834 size_t cnt, loff_t *ppos)
2231{ 2835{
2232 char buf[64]; 2836 char buf[64];
2233 char *cmp = buf; 2837 char *cmp = buf;
2234 int neg = 0; 2838 int neg = 0;
2839 int ret;
2235 int i; 2840 int i;
2236 2841
2237 if (cnt >= sizeof(buf)) 2842 if (cnt >= sizeof(buf))
@@ -2258,11 +2863,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2258 break; 2863 break;
2259 } 2864 }
2260 } 2865 }
2261 /* 2866
2262 * If no option could be set, return an error: 2867 /* If no option could be set, test the specific tracer options */
2263 */ 2868 if (!trace_options[i]) {
2264 if (!trace_options[i]) 2869 ret = set_tracer_option(current_trace, cmp, neg);
2265 return -EINVAL; 2870 if (ret)
2871 return ret;
2872 }
2266 2873
2267 filp->f_pos += cnt; 2874 filp->f_pos += cnt;
2268 2875
@@ -2271,8 +2878,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2271 2878
2272static struct file_operations tracing_iter_fops = { 2879static struct file_operations tracing_iter_fops = {
2273 .open = tracing_open_generic, 2880 .open = tracing_open_generic,
2274 .read = tracing_iter_ctrl_read, 2881 .read = tracing_trace_options_read,
2275 .write = tracing_iter_ctrl_write, 2882 .write = tracing_trace_options_write,
2276}; 2883};
2277 2884
2278static const char readme_msg[] = 2885static const char readme_msg[] =
@@ -2286,9 +2893,9 @@ static const char readme_msg[] =
2286 "# echo sched_switch > /debug/tracing/current_tracer\n" 2893 "# echo sched_switch > /debug/tracing/current_tracer\n"
2287 "# cat /debug/tracing/current_tracer\n" 2894 "# cat /debug/tracing/current_tracer\n"
2288 "sched_switch\n" 2895 "sched_switch\n"
2289 "# cat /debug/tracing/iter_ctrl\n" 2896 "# cat /debug/tracing/trace_options\n"
2290 "noprint-parent nosym-offset nosym-addr noverbose\n" 2897 "noprint-parent nosym-offset nosym-addr noverbose\n"
2291 "# echo print-parent > /debug/tracing/iter_ctrl\n" 2898 "# echo print-parent > /debug/tracing/trace_options\n"
2292 "# echo 1 > /debug/tracing/tracing_enabled\n" 2899 "# echo 1 > /debug/tracing/tracing_enabled\n"
2293 "# cat /debug/tracing/trace > /tmp/trace.txt\n" 2900 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2294 "echo 0 > /debug/tracing/tracing_enabled\n" 2901 "echo 0 > /debug/tracing/tracing_enabled\n"
@@ -2311,11 +2918,10 @@ static ssize_t
2311tracing_ctrl_read(struct file *filp, char __user *ubuf, 2918tracing_ctrl_read(struct file *filp, char __user *ubuf,
2312 size_t cnt, loff_t *ppos) 2919 size_t cnt, loff_t *ppos)
2313{ 2920{
2314 struct trace_array *tr = filp->private_data;
2315 char buf[64]; 2921 char buf[64];
2316 int r; 2922 int r;
2317 2923
2318 r = sprintf(buf, "%ld\n", tr->ctrl); 2924 r = sprintf(buf, "%u\n", tracer_enabled);
2319 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2925 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2320} 2926}
2321 2927
@@ -2343,16 +2949,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2343 val = !!val; 2949 val = !!val;
2344 2950
2345 mutex_lock(&trace_types_lock); 2951 mutex_lock(&trace_types_lock);
2346 if (tr->ctrl ^ val) { 2952 if (tracer_enabled ^ val) {
2347 if (val) 2953 if (val) {
2348 tracer_enabled = 1; 2954 tracer_enabled = 1;
2349 else 2955 if (current_trace->start)
2956 current_trace->start(tr);
2957 tracing_start();
2958 } else {
2350 tracer_enabled = 0; 2959 tracer_enabled = 0;
2351 2960 tracing_stop();
2352 tr->ctrl = val; 2961 if (current_trace->stop)
2353 2962 current_trace->stop(tr);
2354 if (current_trace && current_trace->ctrl_update) 2963 }
2355 current_trace->ctrl_update(tr);
2356 } 2964 }
2357 mutex_unlock(&trace_types_lock); 2965 mutex_unlock(&trace_types_lock);
2358 2966
@@ -2378,29 +2986,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
2378 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2986 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2379} 2987}
2380 2988
2381static ssize_t 2989static int tracing_set_tracer(char *buf)
2382tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 size_t cnt, loff_t *ppos)
2384{ 2990{
2385 struct trace_array *tr = &global_trace; 2991 struct trace_array *tr = &global_trace;
2386 struct tracer *t; 2992 struct tracer *t;
2387 char buf[max_tracer_type_len+1]; 2993 int ret = 0;
2388 int i;
2389 size_t ret;
2390
2391 ret = cnt;
2392
2393 if (cnt > max_tracer_type_len)
2394 cnt = max_tracer_type_len;
2395
2396 if (copy_from_user(&buf, ubuf, cnt))
2397 return -EFAULT;
2398
2399 buf[cnt] = 0;
2400
2401 /* strip ending whitespace. */
2402 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2403 buf[i] = 0;
2404 2994
2405 mutex_lock(&trace_types_lock); 2995 mutex_lock(&trace_types_lock);
2406 for (t = trace_types; t; t = t->next) { 2996 for (t = trace_types; t; t = t->next) {
@@ -2414,18 +3004,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2414 if (t == current_trace) 3004 if (t == current_trace)
2415 goto out; 3005 goto out;
2416 3006
3007 trace_branch_disable();
2417 if (current_trace && current_trace->reset) 3008 if (current_trace && current_trace->reset)
2418 current_trace->reset(tr); 3009 current_trace->reset(tr);
2419 3010
2420 current_trace = t; 3011 current_trace = t;
2421 if (t->init) 3012 if (t->init) {
2422 t->init(tr); 3013 ret = t->init(tr);
3014 if (ret)
3015 goto out;
3016 }
2423 3017
3018 trace_branch_enable(tr);
2424 out: 3019 out:
2425 mutex_unlock(&trace_types_lock); 3020 mutex_unlock(&trace_types_lock);
2426 3021
2427 if (ret > 0) 3022 return ret;
2428 filp->f_pos += ret; 3023}
3024
3025static ssize_t
3026tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3027 size_t cnt, loff_t *ppos)
3028{
3029 char buf[max_tracer_type_len+1];
3030 int i;
3031 size_t ret;
3032 int err;
3033
3034 ret = cnt;
3035
3036 if (cnt > max_tracer_type_len)
3037 cnt = max_tracer_type_len;
3038
3039 if (copy_from_user(&buf, ubuf, cnt))
3040 return -EFAULT;
3041
3042 buf[cnt] = 0;
3043
3044 /* strip ending whitespace. */
3045 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3046 buf[i] = 0;
3047
3048 err = tracing_set_tracer(buf);
3049 if (err)
3050 return err;
3051
3052 filp->f_pos += ret;
2429 3053
2430 return ret; 3054 return ret;
2431} 3055}
@@ -2492,6 +3116,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2492 return -ENOMEM; 3116 return -ENOMEM;
2493 3117
2494 mutex_lock(&trace_types_lock); 3118 mutex_lock(&trace_types_lock);
3119
3120 /* trace pipe does not show start of buffer */
3121 cpus_setall(iter->started);
3122
2495 iter->tr = &global_trace; 3123 iter->tr = &global_trace;
2496 iter->trace = current_trace; 3124 iter->trace = current_trace;
2497 filp->private_data = iter; 3125 filp->private_data = iter;
@@ -2667,7 +3295,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
2667 char buf[64]; 3295 char buf[64];
2668 int r; 3296 int r;
2669 3297
2670 r = sprintf(buf, "%lu\n", tr->entries); 3298 r = sprintf(buf, "%lu\n", tr->entries >> 10);
2671 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3299 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2672} 3300}
2673 3301
@@ -2678,7 +3306,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2678 unsigned long val; 3306 unsigned long val;
2679 char buf[64]; 3307 char buf[64];
2680 int ret, cpu; 3308 int ret, cpu;
2681 struct trace_array *tr = filp->private_data;
2682 3309
2683 if (cnt >= sizeof(buf)) 3310 if (cnt >= sizeof(buf))
2684 return -EINVAL; 3311 return -EINVAL;
@@ -2698,12 +3325,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2698 3325
2699 mutex_lock(&trace_types_lock); 3326 mutex_lock(&trace_types_lock);
2700 3327
2701 if (tr->ctrl) { 3328 tracing_stop();
2702 cnt = -EBUSY;
2703 pr_info("ftrace: please disable tracing"
2704 " before modifying buffer size\n");
2705 goto out;
2706 }
2707 3329
2708 /* disable all cpu buffers */ 3330 /* disable all cpu buffers */
2709 for_each_tracing_cpu(cpu) { 3331 for_each_tracing_cpu(cpu) {
@@ -2713,6 +3335,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2713 atomic_inc(&max_tr.data[cpu]->disabled); 3335 atomic_inc(&max_tr.data[cpu]->disabled);
2714 } 3336 }
2715 3337
3338 /* value is in KB */
3339 val <<= 10;
3340
2716 if (val != global_trace.entries) { 3341 if (val != global_trace.entries) {
2717 ret = ring_buffer_resize(global_trace.buffer, val); 3342 ret = ring_buffer_resize(global_trace.buffer, val);
2718 if (ret < 0) { 3343 if (ret < 0) {
@@ -2751,6 +3376,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2751 atomic_dec(&max_tr.data[cpu]->disabled); 3376 atomic_dec(&max_tr.data[cpu]->disabled);
2752 } 3377 }
2753 3378
3379 tracing_start();
2754 max_tr.entries = global_trace.entries; 3380 max_tr.entries = global_trace.entries;
2755 mutex_unlock(&trace_types_lock); 3381 mutex_unlock(&trace_types_lock);
2756 3382
@@ -2762,7 +3388,7 @@ static int mark_printk(const char *fmt, ...)
2762 int ret; 3388 int ret;
2763 va_list args; 3389 va_list args;
2764 va_start(args, fmt); 3390 va_start(args, fmt);
2765 ret = trace_vprintk(0, fmt, args); 3391 ret = trace_vprintk(0, -1, fmt, args);
2766 va_end(args); 3392 va_end(args);
2767 return ret; 3393 return ret;
2768} 3394}
@@ -2773,9 +3399,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
2773{ 3399{
2774 char *buf; 3400 char *buf;
2775 char *end; 3401 char *end;
2776 struct trace_array *tr = &global_trace;
2777 3402
2778 if (!tr->ctrl || tracing_disabled) 3403 if (tracing_disabled)
2779 return -EINVAL; 3404 return -EINVAL;
2780 3405
2781 if (cnt > TRACE_BUF_SIZE) 3406 if (cnt > TRACE_BUF_SIZE)
@@ -2841,22 +3466,38 @@ static struct file_operations tracing_mark_fops = {
2841 3466
2842#ifdef CONFIG_DYNAMIC_FTRACE 3467#ifdef CONFIG_DYNAMIC_FTRACE
2843 3468
3469int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3470{
3471 return 0;
3472}
3473
2844static ssize_t 3474static ssize_t
2845tracing_read_long(struct file *filp, char __user *ubuf, 3475tracing_read_dyn_info(struct file *filp, char __user *ubuf,
2846 size_t cnt, loff_t *ppos) 3476 size_t cnt, loff_t *ppos)
2847{ 3477{
3478 static char ftrace_dyn_info_buffer[1024];
3479 static DEFINE_MUTEX(dyn_info_mutex);
2848 unsigned long *p = filp->private_data; 3480 unsigned long *p = filp->private_data;
2849 char buf[64]; 3481 char *buf = ftrace_dyn_info_buffer;
3482 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
2850 int r; 3483 int r;
2851 3484
2852 r = sprintf(buf, "%ld\n", *p); 3485 mutex_lock(&dyn_info_mutex);
3486 r = sprintf(buf, "%ld ", *p);
2853 3487
2854 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3488 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
3489 buf[r++] = '\n';
3490
3491 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3492
3493 mutex_unlock(&dyn_info_mutex);
3494
3495 return r;
2855} 3496}
2856 3497
2857static struct file_operations tracing_read_long_fops = { 3498static struct file_operations tracing_dyn_info_fops = {
2858 .open = tracing_open_generic, 3499 .open = tracing_open_generic,
2859 .read = tracing_read_long, 3500 .read = tracing_read_dyn_info,
2860}; 3501};
2861#endif 3502#endif
2862 3503
@@ -2897,10 +3538,10 @@ static __init int tracer_init_debugfs(void)
2897 if (!entry) 3538 if (!entry)
2898 pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); 3539 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2899 3540
2900 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, 3541 entry = debugfs_create_file("trace_options", 0644, d_tracer,
2901 NULL, &tracing_iter_fops); 3542 NULL, &tracing_iter_fops);
2902 if (!entry) 3543 if (!entry)
2903 pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); 3544 pr_warning("Could not create debugfs 'trace_options' entry\n");
2904 3545
2905 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, 3546 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2906 NULL, &tracing_cpumask_fops); 3547 NULL, &tracing_cpumask_fops);
@@ -2950,11 +3591,11 @@ static __init int tracer_init_debugfs(void)
2950 pr_warning("Could not create debugfs " 3591 pr_warning("Could not create debugfs "
2951 "'trace_pipe' entry\n"); 3592 "'trace_pipe' entry\n");
2952 3593
2953 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 3594 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
2954 &global_trace, &tracing_entries_fops); 3595 &global_trace, &tracing_entries_fops);
2955 if (!entry) 3596 if (!entry)
2956 pr_warning("Could not create debugfs " 3597 pr_warning("Could not create debugfs "
2957 "'trace_entries' entry\n"); 3598 "'buffer_size_kb' entry\n");
2958 3599
2959 entry = debugfs_create_file("trace_marker", 0220, d_tracer, 3600 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2960 NULL, &tracing_mark_fops); 3601 NULL, &tracing_mark_fops);
@@ -2965,7 +3606,7 @@ static __init int tracer_init_debugfs(void)
2965#ifdef CONFIG_DYNAMIC_FTRACE 3606#ifdef CONFIG_DYNAMIC_FTRACE
2966 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 3607 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2967 &ftrace_update_tot_cnt, 3608 &ftrace_update_tot_cnt,
2968 &tracing_read_long_fops); 3609 &tracing_dyn_info_fops);
2969 if (!entry) 3610 if (!entry)
2970 pr_warning("Could not create debugfs " 3611 pr_warning("Could not create debugfs "
2971 "'dyn_ftrace_total_info' entry\n"); 3612 "'dyn_ftrace_total_info' entry\n");
@@ -2976,7 +3617,7 @@ static __init int tracer_init_debugfs(void)
2976 return 0; 3617 return 0;
2977} 3618}
2978 3619
2979int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3620int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
2980{ 3621{
2981 static DEFINE_SPINLOCK(trace_buf_lock); 3622 static DEFINE_SPINLOCK(trace_buf_lock);
2982 static char trace_buf[TRACE_BUF_SIZE]; 3623 static char trace_buf[TRACE_BUF_SIZE];
@@ -2984,11 +3625,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2984 struct ring_buffer_event *event; 3625 struct ring_buffer_event *event;
2985 struct trace_array *tr = &global_trace; 3626 struct trace_array *tr = &global_trace;
2986 struct trace_array_cpu *data; 3627 struct trace_array_cpu *data;
2987 struct print_entry *entry;
2988 unsigned long flags, irq_flags;
2989 int cpu, len = 0, size, pc; 3628 int cpu, len = 0, size, pc;
3629 struct print_entry *entry;
3630 unsigned long irq_flags;
2990 3631
2991 if (!tr->ctrl || tracing_disabled) 3632 if (tracing_disabled || tracing_selftest_running)
2992 return 0; 3633 return 0;
2993 3634
2994 pc = preempt_count(); 3635 pc = preempt_count();
@@ -2999,7 +3640,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2999 if (unlikely(atomic_read(&data->disabled))) 3640 if (unlikely(atomic_read(&data->disabled)))
3000 goto out; 3641 goto out;
3001 3642
3002 spin_lock_irqsave(&trace_buf_lock, flags); 3643 pause_graph_tracing();
3644 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3003 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3645 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3004 3646
3005 len = min(len, TRACE_BUF_SIZE-1); 3647 len = min(len, TRACE_BUF_SIZE-1);
@@ -3010,17 +3652,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3010 if (!event) 3652 if (!event)
3011 goto out_unlock; 3653 goto out_unlock;
3012 entry = ring_buffer_event_data(event); 3654 entry = ring_buffer_event_data(event);
3013 tracing_generic_entry_update(&entry->ent, flags, pc); 3655 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3014 entry->ent.type = TRACE_PRINT; 3656 entry->ent.type = TRACE_PRINT;
3015 entry->ip = ip; 3657 entry->ip = ip;
3658 entry->depth = depth;
3016 3659
3017 memcpy(&entry->buf, trace_buf, len); 3660 memcpy(&entry->buf, trace_buf, len);
3018 entry->buf[len] = 0; 3661 entry->buf[len] = 0;
3019 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3662 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3020 3663
3021 out_unlock: 3664 out_unlock:
3022 spin_unlock_irqrestore(&trace_buf_lock, flags); 3665 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3023 3666 unpause_graph_tracing();
3024 out: 3667 out:
3025 preempt_enable_notrace(); 3668 preempt_enable_notrace();
3026 3669
@@ -3037,7 +3680,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3037 return 0; 3680 return 0;
3038 3681
3039 va_start(ap, fmt); 3682 va_start(ap, fmt);
3040 ret = trace_vprintk(ip, fmt, ap); 3683 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
3041 va_end(ap); 3684 va_end(ap);
3042 return ret; 3685 return ret;
3043} 3686}
@@ -3046,7 +3689,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk);
3046static int trace_panic_handler(struct notifier_block *this, 3689static int trace_panic_handler(struct notifier_block *this,
3047 unsigned long event, void *unused) 3690 unsigned long event, void *unused)
3048{ 3691{
3049 ftrace_dump(); 3692 if (ftrace_dump_on_oops)
3693 ftrace_dump();
3050 return NOTIFY_OK; 3694 return NOTIFY_OK;
3051} 3695}
3052 3696
@@ -3062,7 +3706,8 @@ static int trace_die_handler(struct notifier_block *self,
3062{ 3706{
3063 switch (val) { 3707 switch (val) {
3064 case DIE_OOPS: 3708 case DIE_OOPS:
3065 ftrace_dump(); 3709 if (ftrace_dump_on_oops)
3710 ftrace_dump();
3066 break; 3711 break;
3067 default: 3712 default:
3068 break; 3713 break;
@@ -3103,7 +3748,6 @@ trace_printk_seq(struct trace_seq *s)
3103 trace_seq_reset(s); 3748 trace_seq_reset(s);
3104} 3749}
3105 3750
3106
3107void ftrace_dump(void) 3751void ftrace_dump(void)
3108{ 3752{
3109 static DEFINE_SPINLOCK(ftrace_dump_lock); 3753 static DEFINE_SPINLOCK(ftrace_dump_lock);
@@ -3128,6 +3772,9 @@ void ftrace_dump(void)
3128 atomic_inc(&global_trace.data[cpu]->disabled); 3772 atomic_inc(&global_trace.data[cpu]->disabled);
3129 } 3773 }
3130 3774
3775 /* don't look at user memory in panic mode */
3776 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3777
3131 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 3778 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3132 3779
3133 iter.tr = &global_trace; 3780 iter.tr = &global_trace;
@@ -3221,7 +3868,6 @@ __init static int tracer_alloc_buffers(void)
3221#endif 3868#endif
3222 3869
3223 /* All seems OK, enable tracing */ 3870 /* All seems OK, enable tracing */
3224 global_trace.ctrl = tracer_enabled;
3225 tracing_disabled = 0; 3871 tracing_disabled = 0;
3226 3872
3227 atomic_notifier_chain_register(&panic_notifier_list, 3873 atomic_notifier_chain_register(&panic_notifier_list,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8465ad052707..cc7a4f864036 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -8,6 +8,7 @@
8#include <linux/ring_buffer.h> 8#include <linux/ring_buffer.h>
9#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <trace/boot.h>
11 12
12enum trace_type { 13enum trace_type {
13 __TRACE_FIRST_TYPE = 0, 14 __TRACE_FIRST_TYPE = 0,
@@ -21,7 +22,14 @@ enum trace_type {
21 TRACE_SPECIAL, 22 TRACE_SPECIAL,
22 TRACE_MMIO_RW, 23 TRACE_MMIO_RW,
23 TRACE_MMIO_MAP, 24 TRACE_MMIO_MAP,
24 TRACE_BOOT, 25 TRACE_BRANCH,
26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET,
28 TRACE_GRAPH_RET,
29 TRACE_GRAPH_ENT,
30 TRACE_USER_STACK,
31 TRACE_HW_BRANCHES,
32 TRACE_POWER,
25 33
26 __TRACE_LAST_TYPE 34 __TRACE_LAST_TYPE
27}; 35};
@@ -38,6 +46,7 @@ struct trace_entry {
38 unsigned char flags; 46 unsigned char flags;
39 unsigned char preempt_count; 47 unsigned char preempt_count;
40 int pid; 48 int pid;
49 int tgid;
41}; 50};
42 51
43/* 52/*
@@ -48,6 +57,18 @@ struct ftrace_entry {
48 unsigned long ip; 57 unsigned long ip;
49 unsigned long parent_ip; 58 unsigned long parent_ip;
50}; 59};
60
61/* Function call entry */
62struct ftrace_graph_ent_entry {
63 struct trace_entry ent;
64 struct ftrace_graph_ent graph_ent;
65};
66
67/* Function return entry */
68struct ftrace_graph_ret_entry {
69 struct trace_entry ent;
70 struct ftrace_graph_ret ret;
71};
51extern struct tracer boot_tracer; 72extern struct tracer boot_tracer;
52 73
53/* 74/*
@@ -85,12 +106,18 @@ struct stack_entry {
85 unsigned long caller[FTRACE_STACK_ENTRIES]; 106 unsigned long caller[FTRACE_STACK_ENTRIES];
86}; 107};
87 108
109struct userstack_entry {
110 struct trace_entry ent;
111 unsigned long caller[FTRACE_STACK_ENTRIES];
112};
113
88/* 114/*
89 * ftrace_printk entry: 115 * ftrace_printk entry:
90 */ 116 */
91struct print_entry { 117struct print_entry {
92 struct trace_entry ent; 118 struct trace_entry ent;
93 unsigned long ip; 119 unsigned long ip;
120 int depth;
94 char buf[]; 121 char buf[];
95}; 122};
96 123
@@ -112,9 +139,35 @@ struct trace_mmiotrace_map {
112 struct mmiotrace_map map; 139 struct mmiotrace_map map;
113}; 140};
114 141
115struct trace_boot { 142struct trace_boot_call {
116 struct trace_entry ent; 143 struct trace_entry ent;
117 struct boot_trace initcall; 144 struct boot_trace_call boot_call;
145};
146
147struct trace_boot_ret {
148 struct trace_entry ent;
149 struct boot_trace_ret boot_ret;
150};
151
152#define TRACE_FUNC_SIZE 30
153#define TRACE_FILE_SIZE 20
154struct trace_branch {
155 struct trace_entry ent;
156 unsigned line;
157 char func[TRACE_FUNC_SIZE+1];
158 char file[TRACE_FILE_SIZE+1];
159 char correct;
160};
161
162struct hw_branch_entry {
163 struct trace_entry ent;
164 u64 from;
165 u64 to;
166};
167
168struct trace_power {
169 struct trace_entry ent;
170 struct power_trace state_data;
118}; 171};
119 172
120/* 173/*
@@ -172,7 +225,6 @@ struct trace_iterator;
172struct trace_array { 225struct trace_array {
173 struct ring_buffer *buffer; 226 struct ring_buffer *buffer;
174 unsigned long entries; 227 unsigned long entries;
175 long ctrl;
176 int cpu; 228 int cpu;
177 cycle_t time_start; 229 cycle_t time_start;
178 struct task_struct *waiter; 230 struct task_struct *waiter;
@@ -212,13 +264,22 @@ extern void __ftrace_bad_type(void);
212 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 264 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
213 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ 265 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
214 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 266 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
267 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
215 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 268 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
216 IF_ASSIGN(var, ent, struct special_entry, 0); \ 269 IF_ASSIGN(var, ent, struct special_entry, 0); \
217 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 270 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
218 TRACE_MMIO_RW); \ 271 TRACE_MMIO_RW); \
219 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 272 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
220 TRACE_MMIO_MAP); \ 273 TRACE_MMIO_MAP); \
221 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ 274 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
275 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
276 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
277 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
278 TRACE_GRAPH_ENT); \
279 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
280 TRACE_GRAPH_RET); \
281 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
282 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
222 __ftrace_bad_type(); \ 283 __ftrace_bad_type(); \
223 } while (0) 284 } while (0)
224 285
@@ -229,29 +290,56 @@ enum print_line_t {
229 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ 290 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
230}; 291};
231 292
293
294/*
295 * An option specific to a tracer. This is a boolean value.
296 * The bit is the bit index that sets its value on the
297 * flags value in struct tracer_flags.
298 */
299struct tracer_opt {
300 const char *name; /* Will appear on the trace_options file */
301 u32 bit; /* Mask assigned in val field in tracer_flags */
302};
303
304/*
305 * The set of specific options for a tracer. Your tracer
306 * have to set the initial value of the flags val.
307 */
308struct tracer_flags {
309 u32 val;
310 struct tracer_opt *opts;
311};
312
313/* Makes more easy to define a tracer opt */
314#define TRACER_OPT(s, b) .name = #s, .bit = b
315
232/* 316/*
233 * A specific tracer, represented by methods that operate on a trace array: 317 * A specific tracer, represented by methods that operate on a trace array:
234 */ 318 */
235struct tracer { 319struct tracer {
236 const char *name; 320 const char *name;
237 void (*init)(struct trace_array *tr); 321 /* Your tracer should raise a warning if init fails */
322 int (*init)(struct trace_array *tr);
238 void (*reset)(struct trace_array *tr); 323 void (*reset)(struct trace_array *tr);
324 void (*start)(struct trace_array *tr);
325 void (*stop)(struct trace_array *tr);
239 void (*open)(struct trace_iterator *iter); 326 void (*open)(struct trace_iterator *iter);
240 void (*pipe_open)(struct trace_iterator *iter); 327 void (*pipe_open)(struct trace_iterator *iter);
241 void (*close)(struct trace_iterator *iter); 328 void (*close)(struct trace_iterator *iter);
242 void (*start)(struct trace_iterator *iter);
243 void (*stop)(struct trace_iterator *iter);
244 ssize_t (*read)(struct trace_iterator *iter, 329 ssize_t (*read)(struct trace_iterator *iter,
245 struct file *filp, char __user *ubuf, 330 struct file *filp, char __user *ubuf,
246 size_t cnt, loff_t *ppos); 331 size_t cnt, loff_t *ppos);
247 void (*ctrl_update)(struct trace_array *tr);
248#ifdef CONFIG_FTRACE_STARTUP_TEST 332#ifdef CONFIG_FTRACE_STARTUP_TEST
249 int (*selftest)(struct tracer *trace, 333 int (*selftest)(struct tracer *trace,
250 struct trace_array *tr); 334 struct trace_array *tr);
251#endif 335#endif
336 void (*print_header)(struct seq_file *m);
252 enum print_line_t (*print_line)(struct trace_iterator *iter); 337 enum print_line_t (*print_line)(struct trace_iterator *iter);
338 /* If you handled the flag setting, return 0 */
339 int (*set_flag)(u32 old_flags, u32 bit, int set);
253 struct tracer *next; 340 struct tracer *next;
254 int print_max; 341 int print_max;
342 struct tracer_flags *flags;
255}; 343};
256 344
257struct trace_seq { 345struct trace_seq {
@@ -279,10 +367,14 @@ struct trace_iterator {
279 unsigned long iter_flags; 367 unsigned long iter_flags;
280 loff_t pos; 368 loff_t pos;
281 long idx; 369 long idx;
370
371 cpumask_t started;
282}; 372};
283 373
374int tracing_is_enabled(void);
284void trace_wake_up(void); 375void trace_wake_up(void);
285void tracing_reset(struct trace_array *tr, int cpu); 376void tracing_reset(struct trace_array *tr, int cpu);
377void tracing_reset_online_cpus(struct trace_array *tr);
286int tracing_open_generic(struct inode *inode, struct file *filp); 378int tracing_open_generic(struct inode *inode, struct file *filp);
287struct dentry *tracing_init_dentry(void); 379struct dentry *tracing_init_dentry(void);
288void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 380void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
@@ -321,8 +413,15 @@ void trace_function(struct trace_array *tr,
321 unsigned long parent_ip, 413 unsigned long parent_ip,
322 unsigned long flags, int pc); 414 unsigned long flags, int pc);
323 415
416void trace_graph_return(struct ftrace_graph_ret *trace);
417int trace_graph_entry(struct ftrace_graph_ent *trace);
418void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
419
324void tracing_start_cmdline_record(void); 420void tracing_start_cmdline_record(void);
325void tracing_stop_cmdline_record(void); 421void tracing_stop_cmdline_record(void);
422void tracing_sched_switch_assign_trace(struct trace_array *tr);
423void tracing_stop_sched_switch_record(void);
424void tracing_start_sched_switch_record(void);
326int register_tracer(struct tracer *type); 425int register_tracer(struct tracer *type);
327void unregister_tracer(struct tracer *type); 426void unregister_tracer(struct tracer *type);
328 427
@@ -358,6 +457,7 @@ struct tracer_switch_ops {
358 struct tracer_switch_ops *next; 457 struct tracer_switch_ops *next;
359}; 458};
360 459
460char *trace_find_cmdline(int pid);
361#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 461#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
362 462
363#ifdef CONFIG_DYNAMIC_FTRACE 463#ifdef CONFIG_DYNAMIC_FTRACE
@@ -383,19 +483,79 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
383 struct trace_array *tr); 483 struct trace_array *tr);
384extern int trace_selftest_startup_sysprof(struct tracer *trace, 484extern int trace_selftest_startup_sysprof(struct tracer *trace,
385 struct trace_array *tr); 485 struct trace_array *tr);
486extern int trace_selftest_startup_branch(struct tracer *trace,
487 struct trace_array *tr);
386#endif /* CONFIG_FTRACE_STARTUP_TEST */ 488#endif /* CONFIG_FTRACE_STARTUP_TEST */
387 489
388extern void *head_page(struct trace_array_cpu *data); 490extern void *head_page(struct trace_array_cpu *data);
389extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); 491extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
390extern void trace_seq_print_cont(struct trace_seq *s, 492extern void trace_seq_print_cont(struct trace_seq *s,
391 struct trace_iterator *iter); 493 struct trace_iterator *iter);
494
495extern int
496seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
497 unsigned long sym_flags);
392extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 498extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
393 size_t cnt); 499 size_t cnt);
394extern long ns2usecs(cycle_t nsec); 500extern long ns2usecs(cycle_t nsec);
395extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); 501extern int
502trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
396 503
397extern unsigned long trace_flags; 504extern unsigned long trace_flags;
398 505
506/* Standard output formatting function used for function return traces */
507#ifdef CONFIG_FUNCTION_GRAPH_TRACER
508extern enum print_line_t print_graph_function(struct trace_iterator *iter);
509
510#ifdef CONFIG_DYNAMIC_FTRACE
511/* TODO: make this variable */
512#define FTRACE_GRAPH_MAX_FUNCS 32
513extern int ftrace_graph_count;
514extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
515
516static inline int ftrace_graph_addr(unsigned long addr)
517{
518 int i;
519
520 if (!ftrace_graph_count || test_tsk_trace_graph(current))
521 return 1;
522
523 for (i = 0; i < ftrace_graph_count; i++) {
524 if (addr == ftrace_graph_funcs[i])
525 return 1;
526 }
527
528 return 0;
529}
530#else
531static inline int ftrace_trace_addr(unsigned long addr)
532{
533 return 1;
534}
535static inline int ftrace_graph_addr(unsigned long addr)
536{
537 return 1;
538}
539#endif /* CONFIG_DYNAMIC_FTRACE */
540
541#else /* CONFIG_FUNCTION_GRAPH_TRACER */
542static inline enum print_line_t
543print_graph_function(struct trace_iterator *iter)
544{
545 return TRACE_TYPE_UNHANDLED;
546}
547#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
548
549extern struct pid *ftrace_pid_trace;
550
551static inline int ftrace_trace_task(struct task_struct *task)
552{
553 if (!ftrace_pid_trace)
554 return 1;
555
556 return test_tsk_trace_trace(task);
557}
558
399/* 559/*
400 * trace_iterator_flags is an enumeration that defines bit 560 * trace_iterator_flags is an enumeration that defines bit
401 * positions into trace_flags that controls the output. 561 * positions into trace_flags that controls the output.
@@ -415,8 +575,93 @@ enum trace_iterator_flags {
415 TRACE_ITER_STACKTRACE = 0x100, 575 TRACE_ITER_STACKTRACE = 0x100,
416 TRACE_ITER_SCHED_TREE = 0x200, 576 TRACE_ITER_SCHED_TREE = 0x200,
417 TRACE_ITER_PRINTK = 0x400, 577 TRACE_ITER_PRINTK = 0x400,
578 TRACE_ITER_PREEMPTONLY = 0x800,
579 TRACE_ITER_BRANCH = 0x1000,
580 TRACE_ITER_ANNOTATE = 0x2000,
581 TRACE_ITER_USERSTACKTRACE = 0x4000,
582 TRACE_ITER_SYM_USEROBJ = 0x8000,
583 TRACE_ITER_PRINTK_MSGONLY = 0x10000
418}; 584};
419 585
586/*
587 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
588 * control the output of kernel symbols.
589 */
590#define TRACE_ITER_SYM_MASK \
591 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
592
420extern struct tracer nop_trace; 593extern struct tracer nop_trace;
421 594
595/**
596 * ftrace_preempt_disable - disable preemption scheduler safe
597 *
598 * When tracing can happen inside the scheduler, there exists
599 * cases that the tracing might happen before the need_resched
600 * flag is checked. If this happens and the tracer calls
601 * preempt_enable (after a disable), a schedule might take place
602 * causing an infinite recursion.
603 *
604 * To prevent this, we read the need_recshed flag before
605 * disabling preemption. When we want to enable preemption we
606 * check the flag, if it is set, then we call preempt_enable_no_resched.
607 * Otherwise, we call preempt_enable.
608 *
609 * The rational for doing the above is that if need resched is set
610 * and we have yet to reschedule, we are either in an atomic location
611 * (where we do not need to check for scheduling) or we are inside
612 * the scheduler and do not want to resched.
613 */
614static inline int ftrace_preempt_disable(void)
615{
616 int resched;
617
618 resched = need_resched();
619 preempt_disable_notrace();
620
621 return resched;
622}
623
624/**
625 * ftrace_preempt_enable - enable preemption scheduler safe
626 * @resched: the return value from ftrace_preempt_disable
627 *
628 * This is a scheduler safe way to enable preemption and not miss
629 * any preemption checks. The disabled saved the state of preemption.
630 * If resched is set, then we were either inside an atomic or
631 * are inside the scheduler (we would have already scheduled
632 * otherwise). In this case, we do not want to call normal
633 * preempt_enable, but preempt_enable_no_resched instead.
634 */
635static inline void ftrace_preempt_enable(int resched)
636{
637 if (resched)
638 preempt_enable_no_resched_notrace();
639 else
640 preempt_enable_notrace();
641}
642
643#ifdef CONFIG_BRANCH_TRACER
644extern int enable_branch_tracing(struct trace_array *tr);
645extern void disable_branch_tracing(void);
646static inline int trace_branch_enable(struct trace_array *tr)
647{
648 if (trace_flags & TRACE_ITER_BRANCH)
649 return enable_branch_tracing(tr);
650 return 0;
651}
652static inline void trace_branch_disable(void)
653{
654 /* due to races, always disable */
655 disable_branch_tracing();
656}
657#else
658static inline int trace_branch_enable(struct trace_array *tr)
659{
660 return 0;
661}
662static inline void trace_branch_disable(void)
663{
664}
665#endif /* CONFIG_BRANCH_TRACER */
666
422#endif /* _LINUX_KERNEL_TRACE_H */ 667#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index d0a5e50eeff2..3ccebde28482 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -13,101 +13,161 @@
13#include "trace.h" 13#include "trace.h"
14 14
15static struct trace_array *boot_trace; 15static struct trace_array *boot_trace;
16static int trace_boot_enabled; 16static bool pre_initcalls_finished;
17 17
18 18/* Tells the boot tracer that the pre_smp_initcalls are finished.
19/* Should be started after do_pre_smp_initcalls() in init/main.c */ 19 * So we are ready .
20 * It doesn't enable sched events tracing however.
21 * You have to call enable_boot_trace to do so.
22 */
20void start_boot_trace(void) 23void start_boot_trace(void)
21{ 24{
22 trace_boot_enabled = 1; 25 pre_initcalls_finished = true;
23} 26}
24 27
25void stop_boot_trace(void) 28void enable_boot_trace(void)
26{ 29{
27 trace_boot_enabled = 0; 30 if (pre_initcalls_finished)
31 tracing_start_sched_switch_record();
28} 32}
29 33
30void reset_boot_trace(struct trace_array *tr) 34void disable_boot_trace(void)
31{ 35{
32 stop_boot_trace(); 36 if (pre_initcalls_finished)
37 tracing_stop_sched_switch_record();
33} 38}
34 39
35static void boot_trace_init(struct trace_array *tr) 40static int boot_trace_init(struct trace_array *tr)
36{ 41{
37 int cpu; 42 int cpu;
38 boot_trace = tr; 43 boot_trace = tr;
39 44
40 trace_boot_enabled = 0;
41
42 for_each_cpu_mask(cpu, cpu_possible_map) 45 for_each_cpu_mask(cpu, cpu_possible_map)
43 tracing_reset(tr, cpu); 46 tracing_reset(tr, cpu);
47
48 tracing_sched_switch_assign_trace(tr);
49 return 0;
44} 50}
45 51
46static void boot_trace_ctrl_update(struct trace_array *tr) 52static enum print_line_t
53initcall_call_print_line(struct trace_iterator *iter)
47{ 54{
48 if (tr->ctrl) 55 struct trace_entry *entry = iter->ent;
49 start_boot_trace(); 56 struct trace_seq *s = &iter->seq;
57 struct trace_boot_call *field;
58 struct boot_trace_call *call;
59 u64 ts;
60 unsigned long nsec_rem;
61 int ret;
62
63 trace_assign_type(field, entry);
64 call = &field->boot_call;
65 ts = iter->ts;
66 nsec_rem = do_div(ts, 1000000000);
67
68 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
69 (unsigned long)ts, nsec_rem, call->func, call->caller);
70
71 if (!ret)
72 return TRACE_TYPE_PARTIAL_LINE;
50 else 73 else
51 stop_boot_trace(); 74 return TRACE_TYPE_HANDLED;
52} 75}
53 76
54static enum print_line_t initcall_print_line(struct trace_iterator *iter) 77static enum print_line_t
78initcall_ret_print_line(struct trace_iterator *iter)
55{ 79{
56 int ret;
57 struct trace_entry *entry = iter->ent; 80 struct trace_entry *entry = iter->ent;
58 struct trace_boot *field = (struct trace_boot *)entry;
59 struct boot_trace *it = &field->initcall;
60 struct trace_seq *s = &iter->seq; 81 struct trace_seq *s = &iter->seq;
61 struct timespec calltime = ktime_to_timespec(it->calltime); 82 struct trace_boot_ret *field;
62 struct timespec rettime = ktime_to_timespec(it->rettime); 83 struct boot_trace_ret *init_ret;
63 84 u64 ts;
64 if (entry->type == TRACE_BOOT) { 85 unsigned long nsec_rem;
65 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", 86 int ret;
66 calltime.tv_sec, 87
67 calltime.tv_nsec, 88 trace_assign_type(field, entry);
68 it->func, it->caller); 89 init_ret = &field->boot_ret;
69 if (!ret) 90 ts = iter->ts;
70 return TRACE_TYPE_PARTIAL_LINE; 91 nsec_rem = do_div(ts, 1000000000);
71 92
72 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " 93 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
73 "returned %d after %lld msecs\n", 94 "returned %d after %llu msecs\n",
74 rettime.tv_sec, 95 (unsigned long) ts,
75 rettime.tv_nsec, 96 nsec_rem,
76 it->func, it->result, it->duration); 97 init_ret->func, init_ret->result, init_ret->duration);
77 98
78 if (!ret) 99 if (!ret)
79 return TRACE_TYPE_PARTIAL_LINE; 100 return TRACE_TYPE_PARTIAL_LINE;
101 else
80 return TRACE_TYPE_HANDLED; 102 return TRACE_TYPE_HANDLED;
103}
104
105static enum print_line_t initcall_print_line(struct trace_iterator *iter)
106{
107 struct trace_entry *entry = iter->ent;
108
109 switch (entry->type) {
110 case TRACE_BOOT_CALL:
111 return initcall_call_print_line(iter);
112 case TRACE_BOOT_RET:
113 return initcall_ret_print_line(iter);
114 default:
115 return TRACE_TYPE_UNHANDLED;
81 } 116 }
82 return TRACE_TYPE_UNHANDLED;
83} 117}
84 118
85struct tracer boot_tracer __read_mostly = 119struct tracer boot_tracer __read_mostly =
86{ 120{
87 .name = "initcall", 121 .name = "initcall",
88 .init = boot_trace_init, 122 .init = boot_trace_init,
89 .reset = reset_boot_trace, 123 .reset = tracing_reset_online_cpus,
90 .ctrl_update = boot_trace_ctrl_update,
91 .print_line = initcall_print_line, 124 .print_line = initcall_print_line,
92}; 125};
93 126
94void trace_boot(struct boot_trace *it, initcall_t fn) 127void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
95{ 128{
96 struct ring_buffer_event *event; 129 struct ring_buffer_event *event;
97 struct trace_boot *entry; 130 struct trace_boot_call *entry;
98 struct trace_array_cpu *data;
99 unsigned long irq_flags; 131 unsigned long irq_flags;
100 struct trace_array *tr = boot_trace; 132 struct trace_array *tr = boot_trace;
101 133
102 if (!trace_boot_enabled) 134 if (!pre_initcalls_finished)
103 return; 135 return;
104 136
105 /* Get its name now since this function could 137 /* Get its name now since this function could
106 * disappear because it is in the .init section. 138 * disappear because it is in the .init section.
107 */ 139 */
108 sprint_symbol(it->func, (unsigned long)fn); 140 sprint_symbol(bt->func, (unsigned long)fn);
141 preempt_disable();
142
143 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
144 &irq_flags);
145 if (!event)
146 goto out;
147 entry = ring_buffer_event_data(event);
148 tracing_generic_entry_update(&entry->ent, 0, 0);
149 entry->ent.type = TRACE_BOOT_CALL;
150 entry->boot_call = *bt;
151 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
152
153 trace_wake_up();
154
155 out:
156 preempt_enable();
157}
158
159void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
160{
161 struct ring_buffer_event *event;
162 struct trace_boot_ret *entry;
163 unsigned long irq_flags;
164 struct trace_array *tr = boot_trace;
165
166 if (!pre_initcalls_finished)
167 return;
168
169 sprint_symbol(bt->func, (unsigned long)fn);
109 preempt_disable(); 170 preempt_disable();
110 data = tr->data[smp_processor_id()];
111 171
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 172 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
113 &irq_flags); 173 &irq_flags);
@@ -115,8 +175,8 @@ void trace_boot(struct boot_trace *it, initcall_t fn)
115 goto out; 175 goto out;
116 entry = ring_buffer_event_data(event); 176 entry = ring_buffer_event_data(event);
117 tracing_generic_entry_update(&entry->ent, 0, 0); 177 tracing_generic_entry_update(&entry->ent, 0, 0);
118 entry->ent.type = TRACE_BOOT; 178 entry->ent.type = TRACE_BOOT_RET;
119 entry->initcall = *it; 179 entry->boot_ret = *bt;
120 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 180 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
121 181
122 trace_wake_up(); 182 trace_wake_up();
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
new file mode 100644
index 000000000000..6c00feb3bac7
--- /dev/null
+++ b/kernel/trace/trace_branch.c
@@ -0,0 +1,342 @@
1/*
2 * unlikely profiler
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/irqflags.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
12#include <linux/module.h>
13#include <linux/ftrace.h>
14#include <linux/hash.h>
15#include <linux/fs.h>
16#include <asm/local.h>
17#include "trace.h"
18
19#ifdef CONFIG_BRANCH_TRACER
20
21static int branch_tracing_enabled __read_mostly;
22static DEFINE_MUTEX(branch_tracing_mutex);
23static struct trace_array *branch_tracer;
24
25static void
26probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
27{
28 struct trace_array *tr = branch_tracer;
29 struct ring_buffer_event *event;
30 struct trace_branch *entry;
31 unsigned long flags, irq_flags;
32 int cpu, pc;
33 const char *p;
34
35 /*
36 * I would love to save just the ftrace_likely_data pointer, but
37 * this code can also be used by modules. Ugly things can happen
38 * if the module is unloaded, and then we go and read the
39 * pointer. This is slower, but much safer.
40 */
41
42 if (unlikely(!tr))
43 return;
44
45 local_irq_save(flags);
46 cpu = raw_smp_processor_id();
47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
48 goto out;
49
50 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
51 &irq_flags);
52 if (!event)
53 goto out;
54
55 pc = preempt_count();
56 entry = ring_buffer_event_data(event);
57 tracing_generic_entry_update(&entry->ent, flags, pc);
58 entry->ent.type = TRACE_BRANCH;
59
60 /* Strip off the path, only save the file */
61 p = f->file + strlen(f->file);
62 while (p >= f->file && *p != '/')
63 p--;
64 p++;
65
66 strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
67 strncpy(entry->file, p, TRACE_FILE_SIZE);
68 entry->func[TRACE_FUNC_SIZE] = 0;
69 entry->file[TRACE_FILE_SIZE] = 0;
70 entry->line = f->line;
71 entry->correct = val == expect;
72
73 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
74
75 out:
76 atomic_dec(&tr->data[cpu]->disabled);
77 local_irq_restore(flags);
78}
79
80static inline
81void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
82{
83 if (!branch_tracing_enabled)
84 return;
85
86 probe_likely_condition(f, val, expect);
87}
88
89int enable_branch_tracing(struct trace_array *tr)
90{
91 int ret = 0;
92
93 mutex_lock(&branch_tracing_mutex);
94 branch_tracer = tr;
95 /*
96 * Must be seen before enabling. The reader is a condition
97 * where we do not need a matching rmb()
98 */
99 smp_wmb();
100 branch_tracing_enabled++;
101 mutex_unlock(&branch_tracing_mutex);
102
103 return ret;
104}
105
106void disable_branch_tracing(void)
107{
108 mutex_lock(&branch_tracing_mutex);
109
110 if (!branch_tracing_enabled)
111 goto out_unlock;
112
113 branch_tracing_enabled--;
114
115 out_unlock:
116 mutex_unlock(&branch_tracing_mutex);
117}
118
119static void start_branch_trace(struct trace_array *tr)
120{
121 enable_branch_tracing(tr);
122}
123
124static void stop_branch_trace(struct trace_array *tr)
125{
126 disable_branch_tracing();
127}
128
129static int branch_trace_init(struct trace_array *tr)
130{
131 int cpu;
132
133 for_each_online_cpu(cpu)
134 tracing_reset(tr, cpu);
135
136 start_branch_trace(tr);
137 return 0;
138}
139
140static void branch_trace_reset(struct trace_array *tr)
141{
142 stop_branch_trace(tr);
143}
144
145struct tracer branch_trace __read_mostly =
146{
147 .name = "branch",
148 .init = branch_trace_init,
149 .reset = branch_trace_reset,
150#ifdef CONFIG_FTRACE_SELFTEST
151 .selftest = trace_selftest_startup_branch,
152#endif
153};
154
155__init static int init_branch_trace(void)
156{
157 return register_tracer(&branch_trace);
158}
159
160device_initcall(init_branch_trace);
161#else
162static inline
163void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
164{
165}
166#endif /* CONFIG_BRANCH_TRACER */
167
168void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
169{
170 /*
171 * I would love to have a trace point here instead, but the
172 * trace point code is so inundated with unlikely and likely
173 * conditions that the recursive nightmare that exists is too
174 * much to try to get working. At least for now.
175 */
176 trace_likely_condition(f, val, expect);
177
178 /* FIXME: Make this atomic! */
179 if (val == expect)
180 f->correct++;
181 else
182 f->incorrect++;
183}
184EXPORT_SYMBOL(ftrace_likely_update);
185
186struct ftrace_pointer {
187 void *start;
188 void *stop;
189 int hit;
190};
191
192static void *
193t_next(struct seq_file *m, void *v, loff_t *pos)
194{
195 const struct ftrace_pointer *f = m->private;
196 struct ftrace_branch_data *p = v;
197
198 (*pos)++;
199
200 if (v == (void *)1)
201 return f->start;
202
203 ++p;
204
205 if ((void *)p >= (void *)f->stop)
206 return NULL;
207
208 return p;
209}
210
211static void *t_start(struct seq_file *m, loff_t *pos)
212{
213 void *t = (void *)1;
214 loff_t l = 0;
215
216 for (; t && l < *pos; t = t_next(m, t, &l))
217 ;
218
219 return t;
220}
221
222static void t_stop(struct seq_file *m, void *p)
223{
224}
225
226static int t_show(struct seq_file *m, void *v)
227{
228 const struct ftrace_pointer *fp = m->private;
229 struct ftrace_branch_data *p = v;
230 const char *f;
231 long percent;
232
233 if (v == (void *)1) {
234 if (fp->hit)
235 seq_printf(m, " miss hit %% ");
236 else
237 seq_printf(m, " correct incorrect %% ");
238 seq_printf(m, " Function "
239 " File Line\n"
240 " ------- --------- - "
241 " -------- "
242 " ---- ----\n");
243 return 0;
244 }
245
246 /* Only print the file, not the path */
247 f = p->file + strlen(p->file);
248 while (f >= p->file && *f != '/')
249 f--;
250 f++;
251
252 /*
253 * The miss is overlayed on correct, and hit on incorrect.
254 */
255 if (p->correct) {
256 percent = p->incorrect * 100;
257 percent /= p->correct + p->incorrect;
258 } else
259 percent = p->incorrect ? 100 : -1;
260
261 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
262 if (percent < 0)
263 seq_printf(m, " X ");
264 else
265 seq_printf(m, "%3ld ", percent);
266 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
267 return 0;
268}
269
270static struct seq_operations tracing_likely_seq_ops = {
271 .start = t_start,
272 .next = t_next,
273 .stop = t_stop,
274 .show = t_show,
275};
276
277static int tracing_branch_open(struct inode *inode, struct file *file)
278{
279 int ret;
280
281 ret = seq_open(file, &tracing_likely_seq_ops);
282 if (!ret) {
283 struct seq_file *m = file->private_data;
284 m->private = (void *)inode->i_private;
285 }
286
287 return ret;
288}
289
290static const struct file_operations tracing_branch_fops = {
291 .open = tracing_branch_open,
292 .read = seq_read,
293 .llseek = seq_lseek,
294};
295
296#ifdef CONFIG_PROFILE_ALL_BRANCHES
297extern unsigned long __start_branch_profile[];
298extern unsigned long __stop_branch_profile[];
299
300static const struct ftrace_pointer ftrace_branch_pos = {
301 .start = __start_branch_profile,
302 .stop = __stop_branch_profile,
303 .hit = 1,
304};
305
306#endif /* CONFIG_PROFILE_ALL_BRANCHES */
307
308extern unsigned long __start_annotated_branch_profile[];
309extern unsigned long __stop_annotated_branch_profile[];
310
311static const struct ftrace_pointer ftrace_annotated_branch_pos = {
312 .start = __start_annotated_branch_profile,
313 .stop = __stop_annotated_branch_profile,
314};
315
316static __init int ftrace_branch_init(void)
317{
318 struct dentry *d_tracer;
319 struct dentry *entry;
320
321 d_tracer = tracing_init_dentry();
322
323 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
324 (void *)&ftrace_annotated_branch_pos,
325 &tracing_branch_fops);
326 if (!entry)
327 pr_warning("Could not create debugfs "
328 "'profile_annotatet_branch' entry\n");
329
330#ifdef CONFIG_PROFILE_ALL_BRANCHES
331 entry = debugfs_create_file("profile_branch", 0444, d_tracer,
332 (void *)&ftrace_branch_pos,
333 &tracing_branch_fops);
334 if (!entry)
335 pr_warning("Could not create debugfs"
336 " 'profile_branch' entry\n");
337#endif
338
339 return 0;
340}
341
342device_initcall(ftrace_branch_init);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 0f85a64003d3..9236d7e25a16 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,20 +16,10 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19static void function_reset(struct trace_array *tr)
20{
21 int cpu;
22
23 tr->time_start = ftrace_now(tr->cpu);
24
25 for_each_online_cpu(cpu)
26 tracing_reset(tr, cpu);
27}
28
29static void start_function_trace(struct trace_array *tr) 19static void start_function_trace(struct trace_array *tr)
30{ 20{
31 tr->cpu = get_cpu(); 21 tr->cpu = get_cpu();
32 function_reset(tr); 22 tracing_reset_online_cpus(tr);
33 put_cpu(); 23 put_cpu();
34 24
35 tracing_start_cmdline_record(); 25 tracing_start_cmdline_record();
@@ -42,24 +32,20 @@ static void stop_function_trace(struct trace_array *tr)
42 tracing_stop_cmdline_record(); 32 tracing_stop_cmdline_record();
43} 33}
44 34
45static void function_trace_init(struct trace_array *tr) 35static int function_trace_init(struct trace_array *tr)
46{ 36{
47 if (tr->ctrl) 37 start_function_trace(tr);
48 start_function_trace(tr); 38 return 0;
49} 39}
50 40
51static void function_trace_reset(struct trace_array *tr) 41static void function_trace_reset(struct trace_array *tr)
52{ 42{
53 if (tr->ctrl) 43 stop_function_trace(tr);
54 stop_function_trace(tr);
55} 44}
56 45
57static void function_trace_ctrl_update(struct trace_array *tr) 46static void function_trace_start(struct trace_array *tr)
58{ 47{
59 if (tr->ctrl) 48 tracing_reset_online_cpus(tr);
60 start_function_trace(tr);
61 else
62 stop_function_trace(tr);
63} 49}
64 50
65static struct tracer function_trace __read_mostly = 51static struct tracer function_trace __read_mostly =
@@ -67,7 +53,7 @@ static struct tracer function_trace __read_mostly =
67 .name = "function", 53 .name = "function",
68 .init = function_trace_init, 54 .init = function_trace_init,
69 .reset = function_trace_reset, 55 .reset = function_trace_reset,
70 .ctrl_update = function_trace_ctrl_update, 56 .start = function_trace_start,
71#ifdef CONFIG_FTRACE_SELFTEST 57#ifdef CONFIG_FTRACE_SELFTEST
72 .selftest = trace_selftest_startup_function, 58 .selftest = trace_selftest_startup_function,
73#endif 59#endif
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644
index 000000000000..4bf39fcae97a
--- /dev/null
+++ b/kernel/trace/trace_functions_graph.c
@@ -0,0 +1,669 @@
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16#define TRACE_GRAPH_INDENT 2
17
18/* Flag options */
19#define TRACE_GRAPH_PRINT_OVERRUN 0x1
20#define TRACE_GRAPH_PRINT_CPU 0x2
21#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
22#define TRACE_GRAPH_PRINT_PROC 0x8
23
24static struct tracer_opt trace_opts[] = {
25 /* Display overruns ? */
26 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
27 /* Display CPU ? */
28 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
29 /* Display Overhead ? */
30 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
31 /* Display proc name/pid */
32 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
33 { } /* Empty entry */
34};
35
36static struct tracer_flags tracer_flags = {
37 /* Don't display overruns and proc by default */
38 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
39 .opts = trace_opts
40};
41
42/* pid on the last trace processed */
43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
44
45static int graph_trace_init(struct trace_array *tr)
46{
47 int cpu, ret;
48
49 for_each_online_cpu(cpu)
50 tracing_reset(tr, cpu);
51
52 ret = register_ftrace_graph(&trace_graph_return,
53 &trace_graph_entry);
54 if (ret)
55 return ret;
56 tracing_start_cmdline_record();
57
58 return 0;
59}
60
61static void graph_trace_reset(struct trace_array *tr)
62{
63 tracing_stop_cmdline_record();
64 unregister_ftrace_graph();
65}
66
67static inline int log10_cpu(int nb)
68{
69 if (nb / 100)
70 return 3;
71 if (nb / 10)
72 return 2;
73 return 1;
74}
75
76static enum print_line_t
77print_graph_cpu(struct trace_seq *s, int cpu)
78{
79 int i;
80 int ret;
81 int log10_this = log10_cpu(cpu);
82 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
83
84
85 /*
86 * Start with a space character - to make it stand out
87 * to the right a bit when trace output is pasted into
88 * email:
89 */
90 ret = trace_seq_printf(s, " ");
91
92 /*
93 * Tricky - we space the CPU field according to the max
94 * number of online CPUs. On a 2-cpu system it would take
95 * a maximum of 1 digit - on a 128 cpu system it would
96 * take up to 3 digits:
97 */
98 for (i = 0; i < log10_all - log10_this; i++) {
99 ret = trace_seq_printf(s, " ");
100 if (!ret)
101 return TRACE_TYPE_PARTIAL_LINE;
102 }
103 ret = trace_seq_printf(s, "%d) ", cpu);
104 if (!ret)
105 return TRACE_TYPE_PARTIAL_LINE;
106
107 return TRACE_TYPE_HANDLED;
108}
109
110#define TRACE_GRAPH_PROCINFO_LENGTH 14
111
112static enum print_line_t
113print_graph_proc(struct trace_seq *s, pid_t pid)
114{
115 int i;
116 int ret;
117 int len;
118 char comm[8];
119 int spaces = 0;
120 /* sign + log10(MAX_INT) + '\0' */
121 char pid_str[11];
122
123 strncpy(comm, trace_find_cmdline(pid), 7);
124 comm[7] = '\0';
125 sprintf(pid_str, "%d", pid);
126
127 /* 1 stands for the "-" character */
128 len = strlen(comm) + strlen(pid_str) + 1;
129
130 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
131 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
132
133 /* First spaces to align center */
134 for (i = 0; i < spaces / 2; i++) {
135 ret = trace_seq_printf(s, " ");
136 if (!ret)
137 return TRACE_TYPE_PARTIAL_LINE;
138 }
139
140 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
141 if (!ret)
142 return TRACE_TYPE_PARTIAL_LINE;
143
144 /* Last spaces to align center */
145 for (i = 0; i < spaces - (spaces / 2); i++) {
146 ret = trace_seq_printf(s, " ");
147 if (!ret)
148 return TRACE_TYPE_PARTIAL_LINE;
149 }
150 return TRACE_TYPE_HANDLED;
151}
152
153
154/* If the pid changed since the last trace, output this event */
155static enum print_line_t
156verif_pid(struct trace_seq *s, pid_t pid, int cpu)
157{
158 pid_t prev_pid;
159 int ret;
160
161 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
162 return TRACE_TYPE_HANDLED;
163
164 prev_pid = last_pid[cpu];
165 last_pid[cpu] = pid;
166
167/*
168 * Context-switch trace line:
169
170 ------------------------------------------
171 | 1) migration/0--1 => sshd-1755
172 ------------------------------------------
173
174 */
175 ret = trace_seq_printf(s,
176 " ------------------------------------------\n");
177 if (!ret)
178 TRACE_TYPE_PARTIAL_LINE;
179
180 ret = print_graph_cpu(s, cpu);
181 if (ret == TRACE_TYPE_PARTIAL_LINE)
182 TRACE_TYPE_PARTIAL_LINE;
183
184 ret = print_graph_proc(s, prev_pid);
185 if (ret == TRACE_TYPE_PARTIAL_LINE)
186 TRACE_TYPE_PARTIAL_LINE;
187
188 ret = trace_seq_printf(s, " => ");
189 if (!ret)
190 TRACE_TYPE_PARTIAL_LINE;
191
192 ret = print_graph_proc(s, pid);
193 if (ret == TRACE_TYPE_PARTIAL_LINE)
194 TRACE_TYPE_PARTIAL_LINE;
195
196 ret = trace_seq_printf(s,
197 "\n ------------------------------------------\n\n");
198 if (!ret)
199 TRACE_TYPE_PARTIAL_LINE;
200
201 return ret;
202}
203
204static bool
205trace_branch_is_leaf(struct trace_iterator *iter,
206 struct ftrace_graph_ent_entry *curr)
207{
208 struct ring_buffer_iter *ring_iter;
209 struct ring_buffer_event *event;
210 struct ftrace_graph_ret_entry *next;
211
212 ring_iter = iter->buffer_iter[iter->cpu];
213
214 if (!ring_iter)
215 return false;
216
217 event = ring_buffer_iter_peek(ring_iter, NULL);
218
219 if (!event)
220 return false;
221
222 next = ring_buffer_event_data(event);
223
224 if (next->ent.type != TRACE_GRAPH_RET)
225 return false;
226
227 if (curr->ent.pid != next->ent.pid ||
228 curr->graph_ent.func != next->ret.func)
229 return false;
230
231 return true;
232}
233
234static enum print_line_t
235print_graph_irq(struct trace_seq *s, unsigned long addr,
236 enum trace_type type, int cpu, pid_t pid)
237{
238 int ret;
239
240 if (addr < (unsigned long)__irqentry_text_start ||
241 addr >= (unsigned long)__irqentry_text_end)
242 return TRACE_TYPE_UNHANDLED;
243
244 if (type == TRACE_GRAPH_ENT) {
245 ret = trace_seq_printf(s, "==========> | ");
246 } else {
247 /* Cpu */
248 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
249 ret = print_graph_cpu(s, cpu);
250 if (ret == TRACE_TYPE_PARTIAL_LINE)
251 return TRACE_TYPE_PARTIAL_LINE;
252 }
253 /* Proc */
254 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
255 ret = print_graph_proc(s, pid);
256 if (ret == TRACE_TYPE_PARTIAL_LINE)
257 return TRACE_TYPE_PARTIAL_LINE;
258
259 ret = trace_seq_printf(s, " | ");
260 if (!ret)
261 return TRACE_TYPE_PARTIAL_LINE;
262 }
263
264 /* No overhead */
265 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
266 ret = trace_seq_printf(s, " ");
267 if (!ret)
268 return TRACE_TYPE_PARTIAL_LINE;
269 }
270
271 ret = trace_seq_printf(s, "<========== |\n");
272 }
273 if (!ret)
274 return TRACE_TYPE_PARTIAL_LINE;
275 return TRACE_TYPE_HANDLED;
276}
277
278static enum print_line_t
279print_graph_duration(unsigned long long duration, struct trace_seq *s)
280{
281 unsigned long nsecs_rem = do_div(duration, 1000);
282 /* log10(ULONG_MAX) + '\0' */
283 char msecs_str[21];
284 char nsecs_str[5];
285 int ret, len;
286 int i;
287
288 sprintf(msecs_str, "%lu", (unsigned long) duration);
289
290 /* Print msecs */
291 ret = trace_seq_printf(s, msecs_str);
292 if (!ret)
293 return TRACE_TYPE_PARTIAL_LINE;
294
295 len = strlen(msecs_str);
296
297 /* Print nsecs (we don't want to exceed 7 numbers) */
298 if (len < 7) {
299 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
300 ret = trace_seq_printf(s, ".%s", nsecs_str);
301 if (!ret)
302 return TRACE_TYPE_PARTIAL_LINE;
303 len += strlen(nsecs_str);
304 }
305
306 ret = trace_seq_printf(s, " us ");
307 if (!ret)
308 return TRACE_TYPE_PARTIAL_LINE;
309
310 /* Print remaining spaces to fit the row's width */
311 for (i = len; i < 7; i++) {
312 ret = trace_seq_printf(s, " ");
313 if (!ret)
314 return TRACE_TYPE_PARTIAL_LINE;
315 }
316
317 ret = trace_seq_printf(s, "| ");
318 if (!ret)
319 return TRACE_TYPE_PARTIAL_LINE;
320 return TRACE_TYPE_HANDLED;
321
322}
323
324/* Signal a overhead of time execution to the output */
325static int
326print_graph_overhead(unsigned long long duration, struct trace_seq *s)
327{
328 /* Duration exceeded 100 msecs */
329 if (duration > 100000ULL)
330 return trace_seq_printf(s, "! ");
331
332 /* Duration exceeded 10 msecs */
333 if (duration > 10000ULL)
334 return trace_seq_printf(s, "+ ");
335
336 return trace_seq_printf(s, " ");
337}
338
339/* Case of a leaf function on its call entry */
340static enum print_line_t
341print_graph_entry_leaf(struct trace_iterator *iter,
342 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
343{
344 struct ftrace_graph_ret_entry *ret_entry;
345 struct ftrace_graph_ret *graph_ret;
346 struct ring_buffer_event *event;
347 struct ftrace_graph_ent *call;
348 unsigned long long duration;
349 int ret;
350 int i;
351
352 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
353 ret_entry = ring_buffer_event_data(event);
354 graph_ret = &ret_entry->ret;
355 call = &entry->graph_ent;
356 duration = graph_ret->rettime - graph_ret->calltime;
357
358 /* Overhead */
359 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
360 ret = print_graph_overhead(duration, s);
361 if (!ret)
362 return TRACE_TYPE_PARTIAL_LINE;
363 }
364
365 /* Duration */
366 ret = print_graph_duration(duration, s);
367 if (ret == TRACE_TYPE_PARTIAL_LINE)
368 return TRACE_TYPE_PARTIAL_LINE;
369
370 /* Function */
371 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
372 ret = trace_seq_printf(s, " ");
373 if (!ret)
374 return TRACE_TYPE_PARTIAL_LINE;
375 }
376
377 ret = seq_print_ip_sym(s, call->func, 0);
378 if (!ret)
379 return TRACE_TYPE_PARTIAL_LINE;
380
381 ret = trace_seq_printf(s, "();\n");
382 if (!ret)
383 return TRACE_TYPE_PARTIAL_LINE;
384
385 return TRACE_TYPE_HANDLED;
386}
387
388static enum print_line_t
389print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
390 struct trace_seq *s, pid_t pid, int cpu)
391{
392 int i;
393 int ret;
394 struct ftrace_graph_ent *call = &entry->graph_ent;
395
396 /* No overhead */
397 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
398 ret = trace_seq_printf(s, " ");
399 if (!ret)
400 return TRACE_TYPE_PARTIAL_LINE;
401 }
402
403 /* Interrupt */
404 ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
405 if (ret == TRACE_TYPE_UNHANDLED) {
406 /* No time */
407 ret = trace_seq_printf(s, " | ");
408 if (!ret)
409 return TRACE_TYPE_PARTIAL_LINE;
410 } else {
411 if (ret == TRACE_TYPE_PARTIAL_LINE)
412 return TRACE_TYPE_PARTIAL_LINE;
413 }
414
415
416 /* Function */
417 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
418 ret = trace_seq_printf(s, " ");
419 if (!ret)
420 return TRACE_TYPE_PARTIAL_LINE;
421 }
422
423 ret = seq_print_ip_sym(s, call->func, 0);
424 if (!ret)
425 return TRACE_TYPE_PARTIAL_LINE;
426
427 ret = trace_seq_printf(s, "() {\n");
428 if (!ret)
429 return TRACE_TYPE_PARTIAL_LINE;
430
431 return TRACE_TYPE_HANDLED;
432}
433
434static enum print_line_t
435print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
436 struct trace_iterator *iter, int cpu)
437{
438 int ret;
439 struct trace_entry *ent = iter->ent;
440
441 /* Pid */
442 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
443 return TRACE_TYPE_PARTIAL_LINE;
444
445 /* Cpu */
446 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
447 ret = print_graph_cpu(s, cpu);
448 if (ret == TRACE_TYPE_PARTIAL_LINE)
449 return TRACE_TYPE_PARTIAL_LINE;
450 }
451
452 /* Proc */
453 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
454 ret = print_graph_proc(s, ent->pid);
455 if (ret == TRACE_TYPE_PARTIAL_LINE)
456 return TRACE_TYPE_PARTIAL_LINE;
457
458 ret = trace_seq_printf(s, " | ");
459 if (!ret)
460 return TRACE_TYPE_PARTIAL_LINE;
461 }
462
463 if (trace_branch_is_leaf(iter, field))
464 return print_graph_entry_leaf(iter, field, s);
465 else
466 return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
467
468}
469
470static enum print_line_t
471print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
472 struct trace_entry *ent, int cpu)
473{
474 int i;
475 int ret;
476 unsigned long long duration = trace->rettime - trace->calltime;
477
478 /* Pid */
479 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
480 return TRACE_TYPE_PARTIAL_LINE;
481
482 /* Cpu */
483 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
484 ret = print_graph_cpu(s, cpu);
485 if (ret == TRACE_TYPE_PARTIAL_LINE)
486 return TRACE_TYPE_PARTIAL_LINE;
487 }
488
489 /* Proc */
490 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
491 ret = print_graph_proc(s, ent->pid);
492 if (ret == TRACE_TYPE_PARTIAL_LINE)
493 return TRACE_TYPE_PARTIAL_LINE;
494
495 ret = trace_seq_printf(s, " | ");
496 if (!ret)
497 return TRACE_TYPE_PARTIAL_LINE;
498 }
499
500 /* Overhead */
501 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
502 ret = print_graph_overhead(duration, s);
503 if (!ret)
504 return TRACE_TYPE_PARTIAL_LINE;
505 }
506
507 /* Duration */
508 ret = print_graph_duration(duration, s);
509 if (ret == TRACE_TYPE_PARTIAL_LINE)
510 return TRACE_TYPE_PARTIAL_LINE;
511
512 /* Closing brace */
513 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
514 ret = trace_seq_printf(s, " ");
515 if (!ret)
516 return TRACE_TYPE_PARTIAL_LINE;
517 }
518
519 ret = trace_seq_printf(s, "}\n");
520 if (!ret)
521 return TRACE_TYPE_PARTIAL_LINE;
522
523 /* Overrun */
524 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
525 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
526 trace->overrun);
527 if (!ret)
528 return TRACE_TYPE_PARTIAL_LINE;
529 }
530
531 ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
532 if (ret == TRACE_TYPE_PARTIAL_LINE)
533 return TRACE_TYPE_PARTIAL_LINE;
534
535 return TRACE_TYPE_HANDLED;
536}
537
538static enum print_line_t
539print_graph_comment(struct print_entry *trace, struct trace_seq *s,
540 struct trace_entry *ent, struct trace_iterator *iter)
541{
542 int i;
543 int ret;
544
545 /* Pid */
546 if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
547 return TRACE_TYPE_PARTIAL_LINE;
548
549 /* Cpu */
550 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
551 ret = print_graph_cpu(s, iter->cpu);
552 if (ret == TRACE_TYPE_PARTIAL_LINE)
553 return TRACE_TYPE_PARTIAL_LINE;
554 }
555
556 /* Proc */
557 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
558 ret = print_graph_proc(s, ent->pid);
559 if (ret == TRACE_TYPE_PARTIAL_LINE)
560 return TRACE_TYPE_PARTIAL_LINE;
561
562 ret = trace_seq_printf(s, " | ");
563 if (!ret)
564 return TRACE_TYPE_PARTIAL_LINE;
565 }
566
567 /* No overhead */
568 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
569 ret = trace_seq_printf(s, " ");
570 if (!ret)
571 return TRACE_TYPE_PARTIAL_LINE;
572 }
573
574 /* No time */
575 ret = trace_seq_printf(s, " | ");
576 if (!ret)
577 return TRACE_TYPE_PARTIAL_LINE;
578
579 /* Indentation */
580 if (trace->depth > 0)
581 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
582 ret = trace_seq_printf(s, " ");
583 if (!ret)
584 return TRACE_TYPE_PARTIAL_LINE;
585 }
586
587 /* The comment */
588 ret = trace_seq_printf(s, "/* %s", trace->buf);
589 if (!ret)
590 return TRACE_TYPE_PARTIAL_LINE;
591
592 if (ent->flags & TRACE_FLAG_CONT)
593 trace_seq_print_cont(s, iter);
594
595 ret = trace_seq_printf(s, " */\n");
596 if (!ret)
597 return TRACE_TYPE_PARTIAL_LINE;
598
599 return TRACE_TYPE_HANDLED;
600}
601
602
603enum print_line_t
604print_graph_function(struct trace_iterator *iter)
605{
606 struct trace_seq *s = &iter->seq;
607 struct trace_entry *entry = iter->ent;
608
609 switch (entry->type) {
610 case TRACE_GRAPH_ENT: {
611 struct ftrace_graph_ent_entry *field;
612 trace_assign_type(field, entry);
613 return print_graph_entry(field, s, iter,
614 iter->cpu);
615 }
616 case TRACE_GRAPH_RET: {
617 struct ftrace_graph_ret_entry *field;
618 trace_assign_type(field, entry);
619 return print_graph_return(&field->ret, s, entry, iter->cpu);
620 }
621 case TRACE_PRINT: {
622 struct print_entry *field;
623 trace_assign_type(field, entry);
624 return print_graph_comment(field, s, entry, iter);
625 }
626 default:
627 return TRACE_TYPE_UNHANDLED;
628 }
629}
630
631static void print_graph_headers(struct seq_file *s)
632{
633 /* 1st line */
634 seq_printf(s, "# ");
635 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
636 seq_printf(s, "CPU ");
637 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
638 seq_printf(s, "TASK/PID ");
639 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
640 seq_printf(s, "OVERHEAD/");
641 seq_printf(s, "DURATION FUNCTION CALLS\n");
642
643 /* 2nd line */
644 seq_printf(s, "# ");
645 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
646 seq_printf(s, "| ");
647 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
648 seq_printf(s, "| | ");
649 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
650 seq_printf(s, "| ");
651 seq_printf(s, "| | | | |\n");
652 } else
653 seq_printf(s, " | | | | |\n");
654}
655static struct tracer graph_trace __read_mostly = {
656 .name = "function_graph",
657 .init = graph_trace_init,
658 .reset = graph_trace_reset,
659 .print_line = print_graph_function,
660 .print_header = print_graph_headers,
661 .flags = &tracer_flags,
662};
663
664static __init int init_graph_trace(void)
665{
666 return register_tracer(&graph_trace);
667}
668
669device_initcall(init_graph_trace);
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
new file mode 100644
index 000000000000..b6a3e20a49a9
--- /dev/null
+++ b/kernel/trace/trace_hw_branches.c
@@ -0,0 +1,195 @@
1/*
2 * h/w branch tracer for x86 based on bts
3 *
4 * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/kallsyms.h>
13
14#include <asm/ds.h>
15
16#include "trace.h"
17
18
19#define SIZEOF_BTS (1 << 13)
20
21static DEFINE_PER_CPU(struct bts_tracer *, tracer);
22static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
23
24#define this_tracer per_cpu(tracer, smp_processor_id())
25#define this_buffer per_cpu(buffer, smp_processor_id())
26
27
28static void bts_trace_start_cpu(void *arg)
29{
30 if (this_tracer)
31 ds_release_bts(this_tracer);
32
33 this_tracer =
34 ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
35 /* ovfl = */ NULL, /* th = */ (size_t)-1,
36 BTS_KERNEL);
37 if (IS_ERR(this_tracer)) {
38 this_tracer = NULL;
39 return;
40 }
41}
42
43static void bts_trace_start(struct trace_array *tr)
44{
45 int cpu;
46
47 tracing_reset_online_cpus(tr);
48
49 for_each_cpu_mask(cpu, cpu_possible_map)
50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
51}
52
53static void bts_trace_stop_cpu(void *arg)
54{
55 if (this_tracer) {
56 ds_release_bts(this_tracer);
57 this_tracer = NULL;
58 }
59}
60
61static void bts_trace_stop(struct trace_array *tr)
62{
63 int cpu;
64
65 for_each_cpu_mask(cpu, cpu_possible_map)
66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
67}
68
69static int bts_trace_init(struct trace_array *tr)
70{
71 tracing_reset_online_cpus(tr);
72 bts_trace_start(tr);
73
74 return 0;
75}
76
77static void bts_trace_print_header(struct seq_file *m)
78{
79 seq_puts(m,
80 "# CPU# FROM TO FUNCTION\n");
81 seq_puts(m,
82 "# | | | |\n");
83}
84
85static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
86{
87 struct trace_entry *entry = iter->ent;
88 struct trace_seq *seq = &iter->seq;
89 struct hw_branch_entry *it;
90
91 trace_assign_type(it, entry);
92
93 if (entry->type == TRACE_HW_BRANCHES) {
94 if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
95 trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
96 it->from, it->to) &&
97 (!it->from ||
98 seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
99 trace_seq_printf(seq, "\n"))
100 return TRACE_TYPE_HANDLED;
101 return TRACE_TYPE_PARTIAL_LINE;;
102 }
103 return TRACE_TYPE_UNHANDLED;
104}
105
106void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
107{
108 struct ring_buffer_event *event;
109 struct hw_branch_entry *entry;
110 unsigned long irq;
111
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
113 if (!event)
114 return;
115 entry = ring_buffer_event_data(event);
116 tracing_generic_entry_update(&entry->ent, 0, from);
117 entry->ent.type = TRACE_HW_BRANCHES;
118 entry->ent.cpu = smp_processor_id();
119 entry->from = from;
120 entry->to = to;
121 ring_buffer_unlock_commit(tr->buffer, event, irq);
122}
123
124static void trace_bts_at(struct trace_array *tr,
125 const struct bts_trace *trace, void *at)
126{
127 struct bts_struct bts;
128 int err = 0;
129
130 WARN_ON_ONCE(!trace->read);
131 if (!trace->read)
132 return;
133
134 err = trace->read(this_tracer, at, &bts);
135 if (err < 0)
136 return;
137
138 switch (bts.qualifier) {
139 case BTS_BRANCH:
140 trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
141 break;
142 }
143}
144
145static void trace_bts_cpu(void *arg)
146{
147 struct trace_array *tr = (struct trace_array *) arg;
148 const struct bts_trace *trace;
149 unsigned char *at;
150
151 if (!this_tracer)
152 return;
153
154 ds_suspend_bts(this_tracer);
155 trace = ds_read_bts(this_tracer);
156 if (!trace)
157 goto out;
158
159 for (at = trace->ds.top; (void *)at < trace->ds.end;
160 at += trace->ds.size)
161 trace_bts_at(tr, trace, at);
162
163 for (at = trace->ds.begin; (void *)at < trace->ds.top;
164 at += trace->ds.size)
165 trace_bts_at(tr, trace, at);
166
167out:
168 ds_resume_bts(this_tracer);
169}
170
171static void trace_bts_prepare(struct trace_iterator *iter)
172{
173 int cpu;
174
175 for_each_cpu_mask(cpu, cpu_possible_map)
176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
177}
178
179struct tracer bts_tracer __read_mostly =
180{
181 .name = "hw-branch-tracer",
182 .init = bts_trace_init,
183 .reset = bts_trace_stop,
184 .print_header = bts_trace_print_header,
185 .print_line = bts_trace_print_line,
186 .start = bts_trace_start,
187 .stop = bts_trace_stop,
188 .open = trace_bts_prepare
189};
190
191__init static int init_bts_trace(void)
192{
193 return register_tracer(&bts_tracer);
194}
195device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 9c74071c10e0..7c2e326bbc8b 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -353,15 +353,28 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
353} 353}
354#endif /* CONFIG_PREEMPT_TRACER */ 354#endif /* CONFIG_PREEMPT_TRACER */
355 355
356/*
357 * save_tracer_enabled is used to save the state of the tracer_enabled
358 * variable when we disable it when we open a trace output file.
359 */
360static int save_tracer_enabled;
361
356static void start_irqsoff_tracer(struct trace_array *tr) 362static void start_irqsoff_tracer(struct trace_array *tr)
357{ 363{
358 register_ftrace_function(&trace_ops); 364 register_ftrace_function(&trace_ops);
359 tracer_enabled = 1; 365 if (tracing_is_enabled()) {
366 tracer_enabled = 1;
367 save_tracer_enabled = 1;
368 } else {
369 tracer_enabled = 0;
370 save_tracer_enabled = 0;
371 }
360} 372}
361 373
362static void stop_irqsoff_tracer(struct trace_array *tr) 374static void stop_irqsoff_tracer(struct trace_array *tr)
363{ 375{
364 tracer_enabled = 0; 376 tracer_enabled = 0;
377 save_tracer_enabled = 0;
365 unregister_ftrace_function(&trace_ops); 378 unregister_ftrace_function(&trace_ops);
366} 379}
367 380
@@ -370,53 +383,55 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
370 irqsoff_trace = tr; 383 irqsoff_trace = tr;
371 /* make sure that the tracer is visible */ 384 /* make sure that the tracer is visible */
372 smp_wmb(); 385 smp_wmb();
373 386 start_irqsoff_tracer(tr);
374 if (tr->ctrl)
375 start_irqsoff_tracer(tr);
376} 387}
377 388
378static void irqsoff_tracer_reset(struct trace_array *tr) 389static void irqsoff_tracer_reset(struct trace_array *tr)
379{ 390{
380 if (tr->ctrl) 391 stop_irqsoff_tracer(tr);
381 stop_irqsoff_tracer(tr);
382} 392}
383 393
384static void irqsoff_tracer_ctrl_update(struct trace_array *tr) 394static void irqsoff_tracer_start(struct trace_array *tr)
385{ 395{
386 if (tr->ctrl) 396 tracer_enabled = 1;
387 start_irqsoff_tracer(tr); 397 save_tracer_enabled = 1;
388 else 398}
389 stop_irqsoff_tracer(tr); 399
400static void irqsoff_tracer_stop(struct trace_array *tr)
401{
402 tracer_enabled = 0;
403 save_tracer_enabled = 0;
390} 404}
391 405
392static void irqsoff_tracer_open(struct trace_iterator *iter) 406static void irqsoff_tracer_open(struct trace_iterator *iter)
393{ 407{
394 /* stop the trace while dumping */ 408 /* stop the trace while dumping */
395 if (iter->tr->ctrl) 409 tracer_enabled = 0;
396 stop_irqsoff_tracer(iter->tr);
397} 410}
398 411
399static void irqsoff_tracer_close(struct trace_iterator *iter) 412static void irqsoff_tracer_close(struct trace_iterator *iter)
400{ 413{
401 if (iter->tr->ctrl) 414 /* restart tracing */
402 start_irqsoff_tracer(iter->tr); 415 tracer_enabled = save_tracer_enabled;
403} 416}
404 417
405#ifdef CONFIG_IRQSOFF_TRACER 418#ifdef CONFIG_IRQSOFF_TRACER
406static void irqsoff_tracer_init(struct trace_array *tr) 419static int irqsoff_tracer_init(struct trace_array *tr)
407{ 420{
408 trace_type = TRACER_IRQS_OFF; 421 trace_type = TRACER_IRQS_OFF;
409 422
410 __irqsoff_tracer_init(tr); 423 __irqsoff_tracer_init(tr);
424 return 0;
411} 425}
412static struct tracer irqsoff_tracer __read_mostly = 426static struct tracer irqsoff_tracer __read_mostly =
413{ 427{
414 .name = "irqsoff", 428 .name = "irqsoff",
415 .init = irqsoff_tracer_init, 429 .init = irqsoff_tracer_init,
416 .reset = irqsoff_tracer_reset, 430 .reset = irqsoff_tracer_reset,
431 .start = irqsoff_tracer_start,
432 .stop = irqsoff_tracer_stop,
417 .open = irqsoff_tracer_open, 433 .open = irqsoff_tracer_open,
418 .close = irqsoff_tracer_close, 434 .close = irqsoff_tracer_close,
419 .ctrl_update = irqsoff_tracer_ctrl_update,
420 .print_max = 1, 435 .print_max = 1,
421#ifdef CONFIG_FTRACE_SELFTEST 436#ifdef CONFIG_FTRACE_SELFTEST
422 .selftest = trace_selftest_startup_irqsoff, 437 .selftest = trace_selftest_startup_irqsoff,
@@ -428,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly =
428#endif 443#endif
429 444
430#ifdef CONFIG_PREEMPT_TRACER 445#ifdef CONFIG_PREEMPT_TRACER
431static void preemptoff_tracer_init(struct trace_array *tr) 446static int preemptoff_tracer_init(struct trace_array *tr)
432{ 447{
433 trace_type = TRACER_PREEMPT_OFF; 448 trace_type = TRACER_PREEMPT_OFF;
434 449
435 __irqsoff_tracer_init(tr); 450 __irqsoff_tracer_init(tr);
451 return 0;
436} 452}
437 453
438static struct tracer preemptoff_tracer __read_mostly = 454static struct tracer preemptoff_tracer __read_mostly =
@@ -440,9 +456,10 @@ static struct tracer preemptoff_tracer __read_mostly =
440 .name = "preemptoff", 456 .name = "preemptoff",
441 .init = preemptoff_tracer_init, 457 .init = preemptoff_tracer_init,
442 .reset = irqsoff_tracer_reset, 458 .reset = irqsoff_tracer_reset,
459 .start = irqsoff_tracer_start,
460 .stop = irqsoff_tracer_stop,
443 .open = irqsoff_tracer_open, 461 .open = irqsoff_tracer_open,
444 .close = irqsoff_tracer_close, 462 .close = irqsoff_tracer_close,
445 .ctrl_update = irqsoff_tracer_ctrl_update,
446 .print_max = 1, 463 .print_max = 1,
447#ifdef CONFIG_FTRACE_SELFTEST 464#ifdef CONFIG_FTRACE_SELFTEST
448 .selftest = trace_selftest_startup_preemptoff, 465 .selftest = trace_selftest_startup_preemptoff,
@@ -456,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly =
456#if defined(CONFIG_IRQSOFF_TRACER) && \ 473#if defined(CONFIG_IRQSOFF_TRACER) && \
457 defined(CONFIG_PREEMPT_TRACER) 474 defined(CONFIG_PREEMPT_TRACER)
458 475
459static void preemptirqsoff_tracer_init(struct trace_array *tr) 476static int preemptirqsoff_tracer_init(struct trace_array *tr)
460{ 477{
461 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 478 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
462 479
463 __irqsoff_tracer_init(tr); 480 __irqsoff_tracer_init(tr);
481 return 0;
464} 482}
465 483
466static struct tracer preemptirqsoff_tracer __read_mostly = 484static struct tracer preemptirqsoff_tracer __read_mostly =
@@ -468,9 +486,10 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
468 .name = "preemptirqsoff", 486 .name = "preemptirqsoff",
469 .init = preemptirqsoff_tracer_init, 487 .init = preemptirqsoff_tracer_init,
470 .reset = irqsoff_tracer_reset, 488 .reset = irqsoff_tracer_reset,
489 .start = irqsoff_tracer_start,
490 .stop = irqsoff_tracer_stop,
471 .open = irqsoff_tracer_open, 491 .open = irqsoff_tracer_open,
472 .close = irqsoff_tracer_close, 492 .close = irqsoff_tracer_close,
473 .ctrl_update = irqsoff_tracer_ctrl_update,
474 .print_max = 1, 493 .print_max = 1,
475#ifdef CONFIG_FTRACE_SELFTEST 494#ifdef CONFIG_FTRACE_SELFTEST
476 .selftest = trace_selftest_startup_preemptirqsoff, 495 .selftest = trace_selftest_startup_preemptirqsoff,
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index e62cbf78eab6..fffcb069f1dc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -22,44 +22,35 @@ static unsigned long prev_overruns;
22 22
23static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
24{ 24{
25 int cpu;
26
27 overrun_detected = false; 25 overrun_detected = false;
28 prev_overruns = 0; 26 prev_overruns = 0;
29 tr->time_start = ftrace_now(tr->cpu);
30 27
31 for_each_online_cpu(cpu) 28 tracing_reset_online_cpus(tr);
32 tracing_reset(tr, cpu);
33} 29}
34 30
35static void mmio_trace_init(struct trace_array *tr) 31static int mmio_trace_init(struct trace_array *tr)
36{ 32{
37 pr_debug("in %s\n", __func__); 33 pr_debug("in %s\n", __func__);
38 mmio_trace_array = tr; 34 mmio_trace_array = tr;
39 if (tr->ctrl) { 35
40 mmio_reset_data(tr); 36 mmio_reset_data(tr);
41 enable_mmiotrace(); 37 enable_mmiotrace();
42 } 38 return 0;
43} 39}
44 40
45static void mmio_trace_reset(struct trace_array *tr) 41static void mmio_trace_reset(struct trace_array *tr)
46{ 42{
47 pr_debug("in %s\n", __func__); 43 pr_debug("in %s\n", __func__);
48 if (tr->ctrl) 44
49 disable_mmiotrace(); 45 disable_mmiotrace();
50 mmio_reset_data(tr); 46 mmio_reset_data(tr);
51 mmio_trace_array = NULL; 47 mmio_trace_array = NULL;
52} 48}
53 49
54static void mmio_trace_ctrl_update(struct trace_array *tr) 50static void mmio_trace_start(struct trace_array *tr)
55{ 51{
56 pr_debug("in %s\n", __func__); 52 pr_debug("in %s\n", __func__);
57 if (tr->ctrl) { 53 mmio_reset_data(tr);
58 mmio_reset_data(tr);
59 enable_mmiotrace();
60 } else {
61 disable_mmiotrace();
62 }
63} 54}
64 55
65static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) 56static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
@@ -296,10 +287,10 @@ static struct tracer mmio_tracer __read_mostly =
296 .name = "mmiotrace", 287 .name = "mmiotrace",
297 .init = mmio_trace_init, 288 .init = mmio_trace_init,
298 .reset = mmio_trace_reset, 289 .reset = mmio_trace_reset,
290 .start = mmio_trace_start,
299 .pipe_open = mmio_pipe_open, 291 .pipe_open = mmio_pipe_open,
300 .close = mmio_close, 292 .close = mmio_close,
301 .read = mmio_read, 293 .read = mmio_read,
302 .ctrl_update = mmio_trace_ctrl_update,
303 .print_line = mmio_print_line, 294 .print_line = mmio_print_line,
304}; 295};
305 296
@@ -371,5 +362,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
371 362
372int mmio_trace_printk(const char *fmt, va_list args) 363int mmio_trace_printk(const char *fmt, va_list args)
373{ 364{
374 return trace_vprintk(0, fmt, args); 365 return trace_vprintk(0, -1, fmt, args);
375} 366}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 4592b4862515..b9767acd30ac 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -12,6 +12,27 @@
12 12
13#include "trace.h" 13#include "trace.h"
14 14
15/* Our two options */
16enum {
17 TRACE_NOP_OPT_ACCEPT = 0x1,
18 TRACE_NOP_OPT_REFUSE = 0x2
19};
20
21/* Options for the tracer (see trace_options file) */
22static struct tracer_opt nop_opts[] = {
23 /* Option that will be accepted by set_flag callback */
24 { TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) },
25 /* Option that will be refused by set_flag callback */
26 { TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) },
27 { } /* Always set a last empty entry */
28};
29
30static struct tracer_flags nop_flags = {
31 /* You can check your flags value here when you want. */
32 .val = 0, /* By default: all flags disabled */
33 .opts = nop_opts
34};
35
15static struct trace_array *ctx_trace; 36static struct trace_array *ctx_trace;
16 37
17static void start_nop_trace(struct trace_array *tr) 38static void start_nop_trace(struct trace_array *tr)
@@ -24,7 +45,7 @@ static void stop_nop_trace(struct trace_array *tr)
24 /* Nothing to do! */ 45 /* Nothing to do! */
25} 46}
26 47
27static void nop_trace_init(struct trace_array *tr) 48static int nop_trace_init(struct trace_array *tr)
28{ 49{
29 int cpu; 50 int cpu;
30 ctx_trace = tr; 51 ctx_trace = tr;
@@ -32,33 +53,53 @@ static void nop_trace_init(struct trace_array *tr)
32 for_each_online_cpu(cpu) 53 for_each_online_cpu(cpu)
33 tracing_reset(tr, cpu); 54 tracing_reset(tr, cpu);
34 55
35 if (tr->ctrl) 56 start_nop_trace(tr);
36 start_nop_trace(tr); 57 return 0;
37} 58}
38 59
39static void nop_trace_reset(struct trace_array *tr) 60static void nop_trace_reset(struct trace_array *tr)
40{ 61{
41 if (tr->ctrl) 62 stop_nop_trace(tr);
42 stop_nop_trace(tr);
43} 63}
44 64
45static void nop_trace_ctrl_update(struct trace_array *tr) 65/* It only serves as a signal handler and a callback to
66 * accept or refuse tthe setting of a flag.
67 * If you don't implement it, then the flag setting will be
68 * automatically accepted.
69 */
70static int nop_set_flag(u32 old_flags, u32 bit, int set)
46{ 71{
47 /* When starting a new trace, reset the buffers */ 72 /*
48 if (tr->ctrl) 73 * Note that you don't need to update nop_flags.val yourself.
49 start_nop_trace(tr); 74 * The tracing Api will do it automatically if you return 0
50 else 75 */
51 stop_nop_trace(tr); 76 if (bit == TRACE_NOP_OPT_ACCEPT) {
77 printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept."
78 " Now cat trace_options to see the result\n",
79 set);
80 return 0;
81 }
82
83 if (bit == TRACE_NOP_OPT_REFUSE) {
84 printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse."
85 "Now cat trace_options to see the result\n",
86 set);
87 return -EINVAL;
88 }
89
90 return 0;
52} 91}
53 92
93
54struct tracer nop_trace __read_mostly = 94struct tracer nop_trace __read_mostly =
55{ 95{
56 .name = "nop", 96 .name = "nop",
57 .init = nop_trace_init, 97 .init = nop_trace_init,
58 .reset = nop_trace_reset, 98 .reset = nop_trace_reset,
59 .ctrl_update = nop_trace_ctrl_update,
60#ifdef CONFIG_FTRACE_SELFTEST 99#ifdef CONFIG_FTRACE_SELFTEST
61 .selftest = trace_selftest_startup_nop, 100 .selftest = trace_selftest_startup_nop,
62#endif 101#endif
102 .flags = &nop_flags,
103 .set_flag = nop_set_flag
63}; 104};
64 105
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
new file mode 100644
index 000000000000..a7172a352f62
--- /dev/null
+++ b/kernel/trace/trace_power.c
@@ -0,0 +1,179 @@
1/*
2 * ring buffer based C-state tracer
3 *
4 * Arjan van de Ven <arjan@linux.intel.com>
5 * Copyright (C) 2008 Intel Corporation
6 *
7 * Much is borrowed from trace_boot.c which is
8 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 *
10 */
11
12#include <linux/init.h>
13#include <linux/debugfs.h>
14#include <linux/ftrace.h>
15#include <linux/kallsyms.h>
16#include <linux/module.h>
17
18#include "trace.h"
19
20static struct trace_array *power_trace;
21static int __read_mostly trace_power_enabled;
22
23
24static void start_power_trace(struct trace_array *tr)
25{
26 trace_power_enabled = 1;
27}
28
29static void stop_power_trace(struct trace_array *tr)
30{
31 trace_power_enabled = 0;
32}
33
34
35static int power_trace_init(struct trace_array *tr)
36{
37 int cpu;
38 power_trace = tr;
39
40 trace_power_enabled = 1;
41
42 for_each_cpu_mask(cpu, cpu_possible_map)
43 tracing_reset(tr, cpu);
44 return 0;
45}
46
47static enum print_line_t power_print_line(struct trace_iterator *iter)
48{
49 int ret = 0;
50 struct trace_entry *entry = iter->ent;
51 struct trace_power *field ;
52 struct power_trace *it;
53 struct trace_seq *s = &iter->seq;
54 struct timespec stamp;
55 struct timespec duration;
56
57 trace_assign_type(field, entry);
58 it = &field->state_data;
59 stamp = ktime_to_timespec(it->stamp);
60 duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
61
62 if (entry->type == TRACE_POWER) {
63 if (it->type == POWER_CSTATE)
64 ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
65 stamp.tv_sec,
66 stamp.tv_nsec,
67 it->state, iter->cpu,
68 duration.tv_sec,
69 duration.tv_nsec);
70 if (it->type == POWER_PSTATE)
71 ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
72 stamp.tv_sec,
73 stamp.tv_nsec,
74 it->state, iter->cpu);
75 if (!ret)
76 return TRACE_TYPE_PARTIAL_LINE;
77 return TRACE_TYPE_HANDLED;
78 }
79 return TRACE_TYPE_UNHANDLED;
80}
81
82static struct tracer power_tracer __read_mostly =
83{
84 .name = "power",
85 .init = power_trace_init,
86 .start = start_power_trace,
87 .stop = stop_power_trace,
88 .reset = stop_power_trace,
89 .print_line = power_print_line,
90};
91
92static int init_power_trace(void)
93{
94 return register_tracer(&power_tracer);
95}
96device_initcall(init_power_trace);
97
98void trace_power_start(struct power_trace *it, unsigned int type,
99 unsigned int level)
100{
101 if (!trace_power_enabled)
102 return;
103
104 memset(it, 0, sizeof(struct power_trace));
105 it->state = level;
106 it->type = type;
107 it->stamp = ktime_get();
108}
109EXPORT_SYMBOL_GPL(trace_power_start);
110
111
112void trace_power_end(struct power_trace *it)
113{
114 struct ring_buffer_event *event;
115 struct trace_power *entry;
116 struct trace_array_cpu *data;
117 unsigned long irq_flags;
118 struct trace_array *tr = power_trace;
119
120 if (!trace_power_enabled)
121 return;
122
123 preempt_disable();
124 it->end = ktime_get();
125 data = tr->data[smp_processor_id()];
126
127 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
128 &irq_flags);
129 if (!event)
130 goto out;
131 entry = ring_buffer_event_data(event);
132 tracing_generic_entry_update(&entry->ent, 0, 0);
133 entry->ent.type = TRACE_POWER;
134 entry->state_data = *it;
135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
136
137 trace_wake_up();
138
139 out:
140 preempt_enable();
141}
142EXPORT_SYMBOL_GPL(trace_power_end);
143
144void trace_power_mark(struct power_trace *it, unsigned int type,
145 unsigned int level)
146{
147 struct ring_buffer_event *event;
148 struct trace_power *entry;
149 struct trace_array_cpu *data;
150 unsigned long irq_flags;
151 struct trace_array *tr = power_trace;
152
153 if (!trace_power_enabled)
154 return;
155
156 memset(it, 0, sizeof(struct power_trace));
157 it->state = level;
158 it->type = type;
159 it->stamp = ktime_get();
160 preempt_disable();
161 it->end = it->stamp;
162 data = tr->data[smp_processor_id()];
163
164 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
165 &irq_flags);
166 if (!event)
167 goto out;
168 entry = ring_buffer_event_data(event);
169 tracing_generic_entry_update(&entry->ent, 0, 0);
170 entry->ent.type = TRACE_POWER;
171 entry->state_data = *it;
172 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
173
174 trace_wake_up();
175
176 out:
177 preempt_enable();
178}
179EXPORT_SYMBOL_GPL(trace_power_mark);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index b8f56beb1a62..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -16,7 +16,8 @@
16 16
17static struct trace_array *ctx_trace; 17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19static atomic_t sched_ref; 19static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
20 21
21static void 22static void
22probe_sched_switch(struct rq *__rq, struct task_struct *prev, 23probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
27 int cpu; 28 int cpu;
28 int pc; 29 int pc;
29 30
30 if (!atomic_read(&sched_ref)) 31 if (!sched_ref)
31 return; 32 return;
32 33
33 tracing_record_cmdline(prev); 34 tracing_record_cmdline(prev);
@@ -48,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
48} 49}
49 50
50static void 51static void
51probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) 52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
52{ 53{
53 struct trace_array_cpu *data; 54 struct trace_array_cpu *data;
54 unsigned long flags; 55 unsigned long flags;
@@ -71,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
71 local_irq_restore(flags); 72 local_irq_restore(flags);
72} 73}
73 74
74static void sched_switch_reset(struct trace_array *tr)
75{
76 int cpu;
77
78 tr->time_start = ftrace_now(tr->cpu);
79
80 for_each_online_cpu(cpu)
81 tracing_reset(tr, cpu);
82}
83
84static int tracing_sched_register(void) 75static int tracing_sched_register(void)
85{ 76{
86 int ret; 77 int ret;
@@ -123,20 +114,18 @@ static void tracing_sched_unregister(void)
123 114
124static void tracing_start_sched_switch(void) 115static void tracing_start_sched_switch(void)
125{ 116{
126 long ref; 117 mutex_lock(&sched_register_mutex);
127 118 if (!(sched_ref++))
128 ref = atomic_inc_return(&sched_ref);
129 if (ref == 1)
130 tracing_sched_register(); 119 tracing_sched_register();
120 mutex_unlock(&sched_register_mutex);
131} 121}
132 122
133static void tracing_stop_sched_switch(void) 123static void tracing_stop_sched_switch(void)
134{ 124{
135 long ref; 125 mutex_lock(&sched_register_mutex);
136 126 if (!(--sched_ref))
137 ref = atomic_dec_and_test(&sched_ref);
138 if (ref)
139 tracing_sched_unregister(); 127 tracing_sched_unregister();
128 mutex_unlock(&sched_register_mutex);
140} 129}
141 130
142void tracing_start_cmdline_record(void) 131void tracing_start_cmdline_record(void)
@@ -149,40 +138,86 @@ void tracing_stop_cmdline_record(void)
149 tracing_stop_sched_switch(); 138 tracing_stop_sched_switch();
150} 139}
151 140
141/**
142 * tracing_start_sched_switch_record - start tracing context switches
143 *
144 * Turns on context switch tracing for a tracer.
145 */
146void tracing_start_sched_switch_record(void)
147{
148 if (unlikely(!ctx_trace)) {
149 WARN_ON(1);
150 return;
151 }
152
153 tracing_start_sched_switch();
154
155 mutex_lock(&sched_register_mutex);
156 tracer_enabled++;
157 mutex_unlock(&sched_register_mutex);
158}
159
160/**
161 * tracing_stop_sched_switch_record - start tracing context switches
162 *
163 * Turns off context switch tracing for a tracer.
164 */
165void tracing_stop_sched_switch_record(void)
166{
167 mutex_lock(&sched_register_mutex);
168 tracer_enabled--;
169 WARN_ON(tracer_enabled < 0);
170 mutex_unlock(&sched_register_mutex);
171
172 tracing_stop_sched_switch();
173}
174
175/**
176 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
177 * @tr: trace array pointer to assign
178 *
179 * Some tracers might want to record the context switches in their
180 * trace. This function lets those tracers assign the trace array
181 * to use.
182 */
183void tracing_sched_switch_assign_trace(struct trace_array *tr)
184{
185 ctx_trace = tr;
186}
187
152static void start_sched_trace(struct trace_array *tr) 188static void start_sched_trace(struct trace_array *tr)
153{ 189{
154 sched_switch_reset(tr); 190 tracing_reset_online_cpus(tr);
155 tracing_start_cmdline_record(); 191 tracing_start_sched_switch_record();
156 tracer_enabled = 1;
157} 192}
158 193
159static void stop_sched_trace(struct trace_array *tr) 194static void stop_sched_trace(struct trace_array *tr)
160{ 195{
161 tracer_enabled = 0; 196 tracing_stop_sched_switch_record();
162 tracing_stop_cmdline_record();
163} 197}
164 198
165static void sched_switch_trace_init(struct trace_array *tr) 199static int sched_switch_trace_init(struct trace_array *tr)
166{ 200{
167 ctx_trace = tr; 201 ctx_trace = tr;
168 202 start_sched_trace(tr);
169 if (tr->ctrl) 203 return 0;
170 start_sched_trace(tr);
171} 204}
172 205
173static void sched_switch_trace_reset(struct trace_array *tr) 206static void sched_switch_trace_reset(struct trace_array *tr)
174{ 207{
175 if (tr->ctrl) 208 if (sched_ref)
176 stop_sched_trace(tr); 209 stop_sched_trace(tr);
177} 210}
178 211
179static void sched_switch_trace_ctrl_update(struct trace_array *tr) 212static void sched_switch_trace_start(struct trace_array *tr)
180{ 213{
181 /* When starting a new trace, reset the buffers */ 214 tracing_reset_online_cpus(tr);
182 if (tr->ctrl) 215 tracing_start_sched_switch();
183 start_sched_trace(tr); 216}
184 else 217
185 stop_sched_trace(tr); 218static void sched_switch_trace_stop(struct trace_array *tr)
219{
220 tracing_stop_sched_switch();
186} 221}
187 222
188static struct tracer sched_switch_trace __read_mostly = 223static struct tracer sched_switch_trace __read_mostly =
@@ -190,7 +225,8 @@ static struct tracer sched_switch_trace __read_mostly =
190 .name = "sched_switch", 225 .name = "sched_switch",
191 .init = sched_switch_trace_init, 226 .init = sched_switch_trace_init,
192 .reset = sched_switch_trace_reset, 227 .reset = sched_switch_trace_reset,
193 .ctrl_update = sched_switch_trace_ctrl_update, 228 .start = sched_switch_trace_start,
229 .stop = sched_switch_trace_stop,
194#ifdef CONFIG_FTRACE_SELFTEST 230#ifdef CONFIG_FTRACE_SELFTEST
195 .selftest = trace_selftest_startup_sched_switch, 231 .selftest = trace_selftest_startup_sched_switch,
196#endif 232#endif
@@ -198,14 +234,7 @@ static struct tracer sched_switch_trace __read_mostly =
198 234
199__init static int init_sched_switch_trace(void) 235__init static int init_sched_switch_trace(void)
200{ 236{
201 int ret = 0;
202
203 if (atomic_read(&sched_ref))
204 ret = tracing_sched_register();
205 if (ret) {
206 pr_info("error registering scheduler trace\n");
207 return ret;
208 }
209 return register_tracer(&sched_switch_trace); 237 return register_tracer(&sched_switch_trace);
210} 238}
211device_initcall(init_sched_switch_trace); 239device_initcall(init_sched_switch_trace);
240
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3ae93f16b565..43586b689e31 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
50 return; 50 return;
51 51
52 pc = preempt_count(); 52 pc = preempt_count();
53 resched = need_resched(); 53 resched = ftrace_preempt_disable();
54 preempt_disable_notrace();
55 54
56 cpu = raw_smp_processor_id(); 55 cpu = raw_smp_processor_id();
57 data = tr->data[cpu]; 56 data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
81 out: 80 out:
82 atomic_dec(&data->disabled); 81 atomic_dec(&data->disabled);
83 82
84 /* 83 ftrace_preempt_enable(resched);
85 * To prevent recursion from the scheduler, if the
86 * resched flag was set before we entered, then
87 * don't reschedule.
88 */
89 if (resched)
90 preempt_enable_no_resched_notrace();
91 else
92 preempt_enable_notrace();
93} 84}
94 85
95static struct ftrace_ops trace_ops __read_mostly = 86static struct ftrace_ops trace_ops __read_mostly =
@@ -220,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
220} 211}
221 212
222static void 213static void
223probe_wakeup(struct rq *rq, struct task_struct *p) 214probe_wakeup(struct rq *rq, struct task_struct *p, int success)
224{ 215{
225 int cpu = smp_processor_id(); 216 int cpu = smp_processor_id();
226 unsigned long flags; 217 unsigned long flags;
@@ -271,6 +262,12 @@ out:
271 atomic_dec(&wakeup_trace->data[cpu]->disabled); 262 atomic_dec(&wakeup_trace->data[cpu]->disabled);
272} 263}
273 264
265/*
266 * save_tracer_enabled is used to save the state of the tracer_enabled
267 * variable when we disable it when we open a trace output file.
268 */
269static int save_tracer_enabled;
270
274static void start_wakeup_tracer(struct trace_array *tr) 271static void start_wakeup_tracer(struct trace_array *tr)
275{ 272{
276 int ret; 273 int ret;
@@ -309,7 +306,13 @@ static void start_wakeup_tracer(struct trace_array *tr)
309 306
310 register_ftrace_function(&trace_ops); 307 register_ftrace_function(&trace_ops);
311 308
312 tracer_enabled = 1; 309 if (tracing_is_enabled()) {
310 tracer_enabled = 1;
311 save_tracer_enabled = 1;
312 } else {
313 tracer_enabled = 0;
314 save_tracer_enabled = 0;
315 }
313 316
314 return; 317 return;
315fail_deprobe_wake_new: 318fail_deprobe_wake_new:
@@ -321,49 +324,53 @@ fail_deprobe:
321static void stop_wakeup_tracer(struct trace_array *tr) 324static void stop_wakeup_tracer(struct trace_array *tr)
322{ 325{
323 tracer_enabled = 0; 326 tracer_enabled = 0;
327 save_tracer_enabled = 0;
324 unregister_ftrace_function(&trace_ops); 328 unregister_ftrace_function(&trace_ops);
325 unregister_trace_sched_switch(probe_wakeup_sched_switch); 329 unregister_trace_sched_switch(probe_wakeup_sched_switch);
326 unregister_trace_sched_wakeup_new(probe_wakeup); 330 unregister_trace_sched_wakeup_new(probe_wakeup);
327 unregister_trace_sched_wakeup(probe_wakeup); 331 unregister_trace_sched_wakeup(probe_wakeup);
328} 332}
329 333
330static void wakeup_tracer_init(struct trace_array *tr) 334static int wakeup_tracer_init(struct trace_array *tr)
331{ 335{
332 wakeup_trace = tr; 336 wakeup_trace = tr;
333 337 start_wakeup_tracer(tr);
334 if (tr->ctrl) 338 return 0;
335 start_wakeup_tracer(tr);
336} 339}
337 340
338static void wakeup_tracer_reset(struct trace_array *tr) 341static void wakeup_tracer_reset(struct trace_array *tr)
339{ 342{
340 if (tr->ctrl) { 343 stop_wakeup_tracer(tr);
341 stop_wakeup_tracer(tr); 344 /* make sure we put back any tasks we are tracing */
342 /* make sure we put back any tasks we are tracing */ 345 wakeup_reset(tr);
343 wakeup_reset(tr); 346}
344 } 347
348static void wakeup_tracer_start(struct trace_array *tr)
349{
350 wakeup_reset(tr);
351 tracer_enabled = 1;
352 save_tracer_enabled = 1;
345} 353}
346 354
347static void wakeup_tracer_ctrl_update(struct trace_array *tr) 355static void wakeup_tracer_stop(struct trace_array *tr)
348{ 356{
349 if (tr->ctrl) 357 tracer_enabled = 0;
350 start_wakeup_tracer(tr); 358 save_tracer_enabled = 0;
351 else
352 stop_wakeup_tracer(tr);
353} 359}
354 360
355static void wakeup_tracer_open(struct trace_iterator *iter) 361static void wakeup_tracer_open(struct trace_iterator *iter)
356{ 362{
357 /* stop the trace while dumping */ 363 /* stop the trace while dumping */
358 if (iter->tr->ctrl) 364 tracer_enabled = 0;
359 stop_wakeup_tracer(iter->tr);
360} 365}
361 366
362static void wakeup_tracer_close(struct trace_iterator *iter) 367static void wakeup_tracer_close(struct trace_iterator *iter)
363{ 368{
364 /* forget about any processes we were recording */ 369 /* forget about any processes we were recording */
365 if (iter->tr->ctrl) 370 if (save_tracer_enabled) {
366 start_wakeup_tracer(iter->tr); 371 wakeup_reset(iter->tr);
372 tracer_enabled = 1;
373 }
367} 374}
368 375
369static struct tracer wakeup_tracer __read_mostly = 376static struct tracer wakeup_tracer __read_mostly =
@@ -371,9 +378,10 @@ static struct tracer wakeup_tracer __read_mostly =
371 .name = "wakeup", 378 .name = "wakeup",
372 .init = wakeup_tracer_init, 379 .init = wakeup_tracer_init,
373 .reset = wakeup_tracer_reset, 380 .reset = wakeup_tracer_reset,
381 .start = wakeup_tracer_start,
382 .stop = wakeup_tracer_stop,
374 .open = wakeup_tracer_open, 383 .open = wakeup_tracer_open,
375 .close = wakeup_tracer_close, 384 .close = wakeup_tracer_close,
376 .ctrl_update = wakeup_tracer_ctrl_update,
377 .print_max = 1, 385 .print_max = 1,
378#ifdef CONFIG_FTRACE_SELFTEST 386#ifdef CONFIG_FTRACE_SELFTEST
379 .selftest = trace_selftest_startup_wakeup, 387 .selftest = trace_selftest_startup_wakeup,
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 90bc752a7580..88c8eb70f54a 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
13 case TRACE_STACK: 13 case TRACE_STACK:
14 case TRACE_PRINT: 14 case TRACE_PRINT:
15 case TRACE_SPECIAL: 15 case TRACE_SPECIAL:
16 case TRACE_BRANCH:
16 return 1; 17 return 1;
17 } 18 }
18 return 0; 19 return 0;
@@ -51,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
51 int cpu, ret = 0; 52 int cpu, ret = 0;
52 53
53 /* Don't allow flipping of max traces now */ 54 /* Don't allow flipping of max traces now */
54 raw_local_irq_save(flags); 55 local_irq_save(flags);
55 __raw_spin_lock(&ftrace_max_lock); 56 __raw_spin_lock(&ftrace_max_lock);
56 57
57 cnt = ring_buffer_entries(tr->buffer); 58 cnt = ring_buffer_entries(tr->buffer);
@@ -62,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62 break; 63 break;
63 } 64 }
64 __raw_spin_unlock(&ftrace_max_lock); 65 __raw_spin_unlock(&ftrace_max_lock);
65 raw_local_irq_restore(flags); 66 local_irq_restore(flags);
66 67
67 if (count) 68 if (count)
68 *count = cnt; 69 *count = cnt;
@@ -70,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
70 return ret; 71 return ret;
71} 72}
72 73
74static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
75{
76 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
77 trace->name, init_ret);
78}
73#ifdef CONFIG_FUNCTION_TRACER 79#ifdef CONFIG_FUNCTION_TRACER
74 80
75#ifdef CONFIG_DYNAMIC_FTRACE 81#ifdef CONFIG_DYNAMIC_FTRACE
@@ -110,8 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
110 ftrace_set_filter(func_name, strlen(func_name), 1); 116 ftrace_set_filter(func_name, strlen(func_name), 1);
111 117
112 /* enable tracing */ 118 /* enable tracing */
113 tr->ctrl = 1; 119 ret = trace->init(tr);
114 trace->init(tr); 120 if (ret) {
121 warn_failed_init_tracer(trace, ret);
122 goto out;
123 }
115 124
116 /* Sleep for a 1/10 of a second */ 125 /* Sleep for a 1/10 of a second */
117 msleep(100); 126 msleep(100);
@@ -134,13 +143,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
134 msleep(100); 143 msleep(100);
135 144
136 /* stop the tracing. */ 145 /* stop the tracing. */
137 tr->ctrl = 0; 146 tracing_stop();
138 trace->ctrl_update(tr);
139 ftrace_enabled = 0; 147 ftrace_enabled = 0;
140 148
141 /* check the trace buffer */ 149 /* check the trace buffer */
142 ret = trace_test_buffer(tr, &count); 150 ret = trace_test_buffer(tr, &count);
143 trace->reset(tr); 151 trace->reset(tr);
152 tracing_start();
144 153
145 /* we should only have one item */ 154 /* we should only have one item */
146 if (!ret && count != 1) { 155 if (!ret && count != 1) {
@@ -148,6 +157,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
148 ret = -1; 157 ret = -1;
149 goto out; 158 goto out;
150 } 159 }
160
151 out: 161 out:
152 ftrace_enabled = save_ftrace_enabled; 162 ftrace_enabled = save_ftrace_enabled;
153 tracer_enabled = save_tracer_enabled; 163 tracer_enabled = save_tracer_enabled;
@@ -180,18 +190,22 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
180 ftrace_enabled = 1; 190 ftrace_enabled = 1;
181 tracer_enabled = 1; 191 tracer_enabled = 1;
182 192
183 tr->ctrl = 1; 193 ret = trace->init(tr);
184 trace->init(tr); 194 if (ret) {
195 warn_failed_init_tracer(trace, ret);
196 goto out;
197 }
198
185 /* Sleep for a 1/10 of a second */ 199 /* Sleep for a 1/10 of a second */
186 msleep(100); 200 msleep(100);
187 /* stop the tracing. */ 201 /* stop the tracing. */
188 tr->ctrl = 0; 202 tracing_stop();
189 trace->ctrl_update(tr);
190 ftrace_enabled = 0; 203 ftrace_enabled = 0;
191 204
192 /* check the trace buffer */ 205 /* check the trace buffer */
193 ret = trace_test_buffer(tr, &count); 206 ret = trace_test_buffer(tr, &count);
194 trace->reset(tr); 207 trace->reset(tr);
208 tracing_start();
195 209
196 if (!ret && !count) { 210 if (!ret && !count) {
197 printk(KERN_CONT ".. no entries found .."); 211 printk(KERN_CONT ".. no entries found ..");
@@ -223,8 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
223 int ret; 237 int ret;
224 238
225 /* start the tracing */ 239 /* start the tracing */
226 tr->ctrl = 1; 240 ret = trace->init(tr);
227 trace->init(tr); 241 if (ret) {
242 warn_failed_init_tracer(trace, ret);
243 return ret;
244 }
245
228 /* reset the max latency */ 246 /* reset the max latency */
229 tracing_max_latency = 0; 247 tracing_max_latency = 0;
230 /* disable interrupts for a bit */ 248 /* disable interrupts for a bit */
@@ -232,13 +250,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
232 udelay(100); 250 udelay(100);
233 local_irq_enable(); 251 local_irq_enable();
234 /* stop the tracing. */ 252 /* stop the tracing. */
235 tr->ctrl = 0; 253 tracing_stop();
236 trace->ctrl_update(tr);
237 /* check both trace buffers */ 254 /* check both trace buffers */
238 ret = trace_test_buffer(tr, NULL); 255 ret = trace_test_buffer(tr, NULL);
239 if (!ret) 256 if (!ret)
240 ret = trace_test_buffer(&max_tr, &count); 257 ret = trace_test_buffer(&max_tr, &count);
241 trace->reset(tr); 258 trace->reset(tr);
259 tracing_start();
242 260
243 if (!ret && !count) { 261 if (!ret && !count) {
244 printk(KERN_CONT ".. no entries found .."); 262 printk(KERN_CONT ".. no entries found ..");
@@ -259,9 +277,26 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
259 unsigned long count; 277 unsigned long count;
260 int ret; 278 int ret;
261 279
280 /*
281 * Now that the big kernel lock is no longer preemptable,
282 * and this is called with the BKL held, it will always
283 * fail. If preemption is already disabled, simply
284 * pass the test. When the BKL is removed, or becomes
285 * preemptible again, we will once again test this,
286 * so keep it in.
287 */
288 if (preempt_count()) {
289 printk(KERN_CONT "can not test ... force ");
290 return 0;
291 }
292
262 /* start the tracing */ 293 /* start the tracing */
263 tr->ctrl = 1; 294 ret = trace->init(tr);
264 trace->init(tr); 295 if (ret) {
296 warn_failed_init_tracer(trace, ret);
297 return ret;
298 }
299
265 /* reset the max latency */ 300 /* reset the max latency */
266 tracing_max_latency = 0; 301 tracing_max_latency = 0;
267 /* disable preemption for a bit */ 302 /* disable preemption for a bit */
@@ -269,13 +304,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
269 udelay(100); 304 udelay(100);
270 preempt_enable(); 305 preempt_enable();
271 /* stop the tracing. */ 306 /* stop the tracing. */
272 tr->ctrl = 0; 307 tracing_stop();
273 trace->ctrl_update(tr);
274 /* check both trace buffers */ 308 /* check both trace buffers */
275 ret = trace_test_buffer(tr, NULL); 309 ret = trace_test_buffer(tr, NULL);
276 if (!ret) 310 if (!ret)
277 ret = trace_test_buffer(&max_tr, &count); 311 ret = trace_test_buffer(&max_tr, &count);
278 trace->reset(tr); 312 trace->reset(tr);
313 tracing_start();
279 314
280 if (!ret && !count) { 315 if (!ret && !count) {
281 printk(KERN_CONT ".. no entries found .."); 316 printk(KERN_CONT ".. no entries found ..");
@@ -296,9 +331,25 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
296 unsigned long count; 331 unsigned long count;
297 int ret; 332 int ret;
298 333
334 /*
335 * Now that the big kernel lock is no longer preemptable,
336 * and this is called with the BKL held, it will always
337 * fail. If preemption is already disabled, simply
338 * pass the test. When the BKL is removed, or becomes
339 * preemptible again, we will once again test this,
340 * so keep it in.
341 */
342 if (preempt_count()) {
343 printk(KERN_CONT "can not test ... force ");
344 return 0;
345 }
346
299 /* start the tracing */ 347 /* start the tracing */
300 tr->ctrl = 1; 348 ret = trace->init(tr);
301 trace->init(tr); 349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
352 }
302 353
303 /* reset the max latency */ 354 /* reset the max latency */
304 tracing_max_latency = 0; 355 tracing_max_latency = 0;
@@ -312,27 +363,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
312 local_irq_enable(); 363 local_irq_enable();
313 364
314 /* stop the tracing. */ 365 /* stop the tracing. */
315 tr->ctrl = 0; 366 tracing_stop();
316 trace->ctrl_update(tr);
317 /* check both trace buffers */ 367 /* check both trace buffers */
318 ret = trace_test_buffer(tr, NULL); 368 ret = trace_test_buffer(tr, NULL);
319 if (ret) 369 if (ret) {
370 tracing_start();
320 goto out; 371 goto out;
372 }
321 373
322 ret = trace_test_buffer(&max_tr, &count); 374 ret = trace_test_buffer(&max_tr, &count);
323 if (ret) 375 if (ret) {
376 tracing_start();
324 goto out; 377 goto out;
378 }
325 379
326 if (!ret && !count) { 380 if (!ret && !count) {
327 printk(KERN_CONT ".. no entries found .."); 381 printk(KERN_CONT ".. no entries found ..");
328 ret = -1; 382 ret = -1;
383 tracing_start();
329 goto out; 384 goto out;
330 } 385 }
331 386
332 /* do the test by disabling interrupts first this time */ 387 /* do the test by disabling interrupts first this time */
333 tracing_max_latency = 0; 388 tracing_max_latency = 0;
334 tr->ctrl = 1; 389 tracing_start();
335 trace->ctrl_update(tr);
336 preempt_disable(); 390 preempt_disable();
337 local_irq_disable(); 391 local_irq_disable();
338 udelay(100); 392 udelay(100);
@@ -341,8 +395,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
341 local_irq_enable(); 395 local_irq_enable();
342 396
343 /* stop the tracing. */ 397 /* stop the tracing. */
344 tr->ctrl = 0; 398 tracing_stop();
345 trace->ctrl_update(tr);
346 /* check both trace buffers */ 399 /* check both trace buffers */
347 ret = trace_test_buffer(tr, NULL); 400 ret = trace_test_buffer(tr, NULL);
348 if (ret) 401 if (ret)
@@ -358,6 +411,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
358 411
359 out: 412 out:
360 trace->reset(tr); 413 trace->reset(tr);
414 tracing_start();
361 tracing_max_latency = save_max; 415 tracing_max_latency = save_max;
362 416
363 return ret; 417 return ret;
@@ -423,8 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
423 wait_for_completion(&isrt); 477 wait_for_completion(&isrt);
424 478
425 /* start the tracing */ 479 /* start the tracing */
426 tr->ctrl = 1; 480 ret = trace->init(tr);
427 trace->init(tr); 481 if (ret) {
482 warn_failed_init_tracer(trace, ret);
483 return ret;
484 }
485
428 /* reset the max latency */ 486 /* reset the max latency */
429 tracing_max_latency = 0; 487 tracing_max_latency = 0;
430 488
@@ -448,8 +506,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
448 msleep(100); 506 msleep(100);
449 507
450 /* stop the tracing. */ 508 /* stop the tracing. */
451 tr->ctrl = 0; 509 tracing_stop();
452 trace->ctrl_update(tr);
453 /* check both trace buffers */ 510 /* check both trace buffers */
454 ret = trace_test_buffer(tr, NULL); 511 ret = trace_test_buffer(tr, NULL);
455 if (!ret) 512 if (!ret)
@@ -457,6 +514,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
457 514
458 515
459 trace->reset(tr); 516 trace->reset(tr);
517 tracing_start();
460 518
461 tracing_max_latency = save_max; 519 tracing_max_latency = save_max;
462 520
@@ -480,16 +538,20 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
480 int ret; 538 int ret;
481 539
482 /* start the tracing */ 540 /* start the tracing */
483 tr->ctrl = 1; 541 ret = trace->init(tr);
484 trace->init(tr); 542 if (ret) {
543 warn_failed_init_tracer(trace, ret);
544 return ret;
545 }
546
485 /* Sleep for a 1/10 of a second */ 547 /* Sleep for a 1/10 of a second */
486 msleep(100); 548 msleep(100);
487 /* stop the tracing. */ 549 /* stop the tracing. */
488 tr->ctrl = 0; 550 tracing_stop();
489 trace->ctrl_update(tr);
490 /* check the trace buffer */ 551 /* check the trace buffer */
491 ret = trace_test_buffer(tr, &count); 552 ret = trace_test_buffer(tr, &count);
492 trace->reset(tr); 553 trace->reset(tr);
554 tracing_start();
493 555
494 if (!ret && !count) { 556 if (!ret && !count) {
495 printk(KERN_CONT ".. no entries found .."); 557 printk(KERN_CONT ".. no entries found ..");
@@ -508,17 +570,48 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
508 int ret; 570 int ret;
509 571
510 /* start the tracing */ 572 /* start the tracing */
511 tr->ctrl = 1; 573 ret = trace->init(tr);
512 trace->init(tr); 574 if (ret) {
575 warn_failed_init_tracer(trace, ret);
576 return 0;
577 }
578
513 /* Sleep for a 1/10 of a second */ 579 /* Sleep for a 1/10 of a second */
514 msleep(100); 580 msleep(100);
515 /* stop the tracing. */ 581 /* stop the tracing. */
516 tr->ctrl = 0; 582 tracing_stop();
517 trace->ctrl_update(tr);
518 /* check the trace buffer */ 583 /* check the trace buffer */
519 ret = trace_test_buffer(tr, &count); 584 ret = trace_test_buffer(tr, &count);
520 trace->reset(tr); 585 trace->reset(tr);
586 tracing_start();
521 587
522 return ret; 588 return ret;
523} 589}
524#endif /* CONFIG_SYSPROF_TRACER */ 590#endif /* CONFIG_SYSPROF_TRACER */
591
592#ifdef CONFIG_BRANCH_TRACER
593int
594trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
595{
596 unsigned long count;
597 int ret;
598
599 /* start the tracing */
600 ret = trace->init(tr);
601 if (ret) {
602 warn_failed_init_tracer(trace, ret);
603 return ret;
604 }
605
606 /* Sleep for a 1/10 of a second */
607 msleep(100);
608 /* stop the tracing. */
609 tracing_stop();
610 /* check the trace buffer */
611 ret = trace_test_buffer(tr, &count);
612 trace->reset(tr);
613 tracing_start();
614
615 return ret;
616}
617#endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 3bdb44bde4b7..d0871bc0aca5 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -10,6 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sysctl.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
15#include "trace.h" 16#include "trace.h"
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock =
31 32
32static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
33static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
35static DEFINE_MUTEX(stack_sysctl_mutex);
36
37int stack_tracer_enabled;
38static int last_stack_tracer_enabled;
34 39
35static inline void check_stack(void) 40static inline void check_stack(void)
36{ 41{
@@ -48,7 +53,7 @@ static inline void check_stack(void)
48 if (!object_is_on_stack(&this_size)) 53 if (!object_is_on_stack(&this_size))
49 return; 54 return;
50 55
51 raw_local_irq_save(flags); 56 local_irq_save(flags);
52 __raw_spin_lock(&max_stack_lock); 57 __raw_spin_lock(&max_stack_lock);
53 58
54 /* a race could have already updated it */ 59 /* a race could have already updated it */
@@ -78,6 +83,7 @@ static inline void check_stack(void)
78 * on a new max, so it is far from a fast path. 83 * on a new max, so it is far from a fast path.
79 */ 84 */
80 while (i < max_stack_trace.nr_entries) { 85 while (i < max_stack_trace.nr_entries) {
86 int found = 0;
81 87
82 stack_dump_index[i] = this_size; 88 stack_dump_index[i] = this_size;
83 p = start; 89 p = start;
@@ -86,17 +92,19 @@ static inline void check_stack(void)
86 if (*p == stack_dump_trace[i]) { 92 if (*p == stack_dump_trace[i]) {
87 this_size = stack_dump_index[i++] = 93 this_size = stack_dump_index[i++] =
88 (top - p) * sizeof(unsigned long); 94 (top - p) * sizeof(unsigned long);
95 found = 1;
89 /* Start the search from here */ 96 /* Start the search from here */
90 start = p + 1; 97 start = p + 1;
91 } 98 }
92 } 99 }
93 100
94 i++; 101 if (!found)
102 i++;
95 } 103 }
96 104
97 out: 105 out:
98 __raw_spin_unlock(&max_stack_lock); 106 __raw_spin_unlock(&max_stack_lock);
99 raw_local_irq_restore(flags); 107 local_irq_restore(flags);
100} 108}
101 109
102static void 110static void
@@ -107,8 +115,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
107 if (unlikely(!ftrace_enabled || stack_trace_disabled)) 115 if (unlikely(!ftrace_enabled || stack_trace_disabled))
108 return; 116 return;
109 117
110 resched = need_resched(); 118 resched = ftrace_preempt_disable();
111 preempt_disable_notrace();
112 119
113 cpu = raw_smp_processor_id(); 120 cpu = raw_smp_processor_id();
114 /* no atomic needed, we only modify this variable by this cpu */ 121 /* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
120 out: 127 out:
121 per_cpu(trace_active, cpu)--; 128 per_cpu(trace_active, cpu)--;
122 /* prevent recursion in schedule */ 129 /* prevent recursion in schedule */
123 if (resched) 130 ftrace_preempt_enable(resched);
124 preempt_enable_no_resched_notrace();
125 else
126 preempt_enable_notrace();
127} 131}
128 132
129static struct ftrace_ops trace_ops __read_mostly = 133static struct ftrace_ops trace_ops __read_mostly =
@@ -166,16 +170,16 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
166 if (ret < 0) 170 if (ret < 0)
167 return ret; 171 return ret;
168 172
169 raw_local_irq_save(flags); 173 local_irq_save(flags);
170 __raw_spin_lock(&max_stack_lock); 174 __raw_spin_lock(&max_stack_lock);
171 *ptr = val; 175 *ptr = val;
172 __raw_spin_unlock(&max_stack_lock); 176 __raw_spin_unlock(&max_stack_lock);
173 raw_local_irq_restore(flags); 177 local_irq_restore(flags);
174 178
175 return count; 179 return count;
176} 180}
177 181
178static struct file_operations stack_max_size_fops = { 182static const struct file_operations stack_max_size_fops = {
179 .open = tracing_open_generic, 183 .open = tracing_open_generic,
180 .read = stack_max_size_read, 184 .read = stack_max_size_read,
181 .write = stack_max_size_write, 185 .write = stack_max_size_write,
@@ -273,7 +277,7 @@ static int t_show(struct seq_file *m, void *v)
273 return 0; 277 return 0;
274} 278}
275 279
276static struct seq_operations stack_trace_seq_ops = { 280static const struct seq_operations stack_trace_seq_ops = {
277 .start = t_start, 281 .start = t_start,
278 .next = t_next, 282 .next = t_next,
279 .stop = t_stop, 283 .stop = t_stop,
@@ -289,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file)
289 return ret; 293 return ret;
290} 294}
291 295
292static struct file_operations stack_trace_fops = { 296static const struct file_operations stack_trace_fops = {
293 .open = stack_trace_open, 297 .open = stack_trace_open,
294 .read = seq_read, 298 .read = seq_read,
295 .llseek = seq_lseek, 299 .llseek = seq_lseek,
296}; 300};
297 301
302int
303stack_trace_sysctl(struct ctl_table *table, int write,
304 struct file *file, void __user *buffer, size_t *lenp,
305 loff_t *ppos)
306{
307 int ret;
308
309 mutex_lock(&stack_sysctl_mutex);
310
311 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
312
313 if (ret || !write ||
314 (last_stack_tracer_enabled == stack_tracer_enabled))
315 goto out;
316
317 last_stack_tracer_enabled = stack_tracer_enabled;
318
319 if (stack_tracer_enabled)
320 register_ftrace_function(&trace_ops);
321 else
322 unregister_ftrace_function(&trace_ops);
323
324 out:
325 mutex_unlock(&stack_sysctl_mutex);
326 return ret;
327}
328
329static __init int enable_stacktrace(char *str)
330{
331 stack_tracer_enabled = 1;
332 last_stack_tracer_enabled = 1;
333 return 1;
334}
335__setup("stacktrace", enable_stacktrace);
336
298static __init int stack_trace_init(void) 337static __init int stack_trace_init(void)
299{ 338{
300 struct dentry *d_tracer; 339 struct dentry *d_tracer;
@@ -312,7 +351,8 @@ static __init int stack_trace_init(void)
312 if (!entry) 351 if (!entry)
313 pr_warning("Could not create debugfs 'stack_trace' entry\n"); 352 pr_warning("Could not create debugfs 'stack_trace' entry\n");
314 353
315 register_ftrace_function(&trace_ops); 354 if (stack_tracer_enabled)
355 register_ftrace_function(&trace_ops);
316 356
317 return 0; 357 return 0;
318} 358}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 9587d3bcba55..01becf1f19ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -234,20 +234,10 @@ static void stop_stack_timers(void)
234 stop_stack_timer(cpu); 234 stop_stack_timer(cpu);
235} 235}
236 236
237static void stack_reset(struct trace_array *tr)
238{
239 int cpu;
240
241 tr->time_start = ftrace_now(tr->cpu);
242
243 for_each_online_cpu(cpu)
244 tracing_reset(tr, cpu);
245}
246
247static void start_stack_trace(struct trace_array *tr) 237static void start_stack_trace(struct trace_array *tr)
248{ 238{
249 mutex_lock(&sample_timer_lock); 239 mutex_lock(&sample_timer_lock);
250 stack_reset(tr); 240 tracing_reset_online_cpus(tr);
251 start_stack_timers(); 241 start_stack_timers();
252 tracer_enabled = 1; 242 tracer_enabled = 1;
253 mutex_unlock(&sample_timer_lock); 243 mutex_unlock(&sample_timer_lock);
@@ -261,27 +251,17 @@ static void stop_stack_trace(struct trace_array *tr)
261 mutex_unlock(&sample_timer_lock); 251 mutex_unlock(&sample_timer_lock);
262} 252}
263 253
264static void stack_trace_init(struct trace_array *tr) 254static int stack_trace_init(struct trace_array *tr)
265{ 255{
266 sysprof_trace = tr; 256 sysprof_trace = tr;
267 257
268 if (tr->ctrl) 258 start_stack_trace(tr);
269 start_stack_trace(tr); 259 return 0;
270} 260}
271 261
272static void stack_trace_reset(struct trace_array *tr) 262static void stack_trace_reset(struct trace_array *tr)
273{ 263{
274 if (tr->ctrl) 264 stop_stack_trace(tr);
275 stop_stack_trace(tr);
276}
277
278static void stack_trace_ctrl_update(struct trace_array *tr)
279{
280 /* When starting a new trace, reset the buffers */
281 if (tr->ctrl)
282 start_stack_trace(tr);
283 else
284 stop_stack_trace(tr);
285} 265}
286 266
287static struct tracer stack_trace __read_mostly = 267static struct tracer stack_trace __read_mostly =
@@ -289,7 +269,6 @@ static struct tracer stack_trace __read_mostly =
289 .name = "sysprof", 269 .name = "sysprof",
290 .init = stack_trace_init, 270 .init = stack_trace_init,
291 .reset = stack_trace_reset, 271 .reset = stack_trace_reset,
292 .ctrl_update = stack_trace_ctrl_update,
293#ifdef CONFIG_FTRACE_SELFTEST 272#ifdef CONFIG_FTRACE_SELFTEST
294 .selftest = trace_selftest_startup_sysprof, 273 .selftest = trace_selftest_startup_sysprof,
295#endif 274#endif