aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig115
-rw-r--r--kernel/trace/Makefile9
-rw-r--r--kernel/trace/ftrace.c929
-rw-r--r--kernel/trace/ring_buffer.c745
-rw-r--r--kernel/trace/trace.c981
-rw-r--r--kernel/trace/trace.h265
-rw-r--r--kernel/trace/trace_boot.c158
-rw-r--r--kernel/trace/trace_branch.c342
-rw-r--r--kernel/trace/trace_functions.c30
-rw-r--r--kernel/trace/trace_functions_graph.c669
-rw-r--r--kernel/trace/trace_hw_branches.c195
-rw-r--r--kernel/trace/trace_irqsoff.c61
-rw-r--r--kernel/trace/trace_mmiotrace.c49
-rw-r--r--kernel/trace/trace_nop.c65
-rw-r--r--kernel/trace/trace_power.c179
-rw-r--r--kernel/trace/trace_sched_switch.c121
-rw-r--r--kernel/trace/trace_sched_wakeup.c72
-rw-r--r--kernel/trace/trace_selftest.c173
-rw-r--r--kernel/trace/trace_stack.c94
-rw-r--r--kernel/trace/trace_sysprof.c32
20 files changed, 4459 insertions, 825 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 33dbefd471e8..e2a4ff6fc3a6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -3,18 +3,34 @@
3# select HAVE_FUNCTION_TRACER: 3# select HAVE_FUNCTION_TRACER:
4# 4#
5 5
6config USER_STACKTRACE_SUPPORT
7 bool
8
6config NOP_TRACER 9config NOP_TRACER
7 bool 10 bool
8 11
9config HAVE_FUNCTION_TRACER 12config HAVE_FUNCTION_TRACER
10 bool 13 bool
11 14
15config HAVE_FUNCTION_GRAPH_TRACER
16 bool
17
18config HAVE_FUNCTION_TRACE_MCOUNT_TEST
19 bool
20 help
21 This gets selected when the arch tests the function_trace_stop
22 variable at the mcount call site. Otherwise, this variable
23 is tested by the called function.
24
12config HAVE_DYNAMIC_FTRACE 25config HAVE_DYNAMIC_FTRACE
13 bool 26 bool
14 27
15config HAVE_FTRACE_MCOUNT_RECORD 28config HAVE_FTRACE_MCOUNT_RECORD
16 bool 29 bool
17 30
31config HAVE_HW_BRANCH_TRACER
32 bool
33
18config TRACER_MAX_TRACE 34config TRACER_MAX_TRACE
19 bool 35 bool
20 36
@@ -47,6 +63,20 @@ config FUNCTION_TRACER
47 (the bootup default), then the overhead of the instructions is very 63 (the bootup default), then the overhead of the instructions is very
48 small and not measurable even in micro-benchmarks. 64 small and not measurable even in micro-benchmarks.
49 65
66config FUNCTION_GRAPH_TRACER
67 bool "Kernel Function Graph Tracer"
68 depends on HAVE_FUNCTION_GRAPH_TRACER
69 depends on FUNCTION_TRACER
70 default y
71 help
72 Enable the kernel to trace a function at both its return
73 and its entry.
74 It's first purpose is to trace the duration of functions and
75 draw a call graph for each thread with some informations like
76 the return value.
77 This is done by setting the current return address on the current
78 task structure into a stack of calls.
79
50config IRQSOFF_TRACER 80config IRQSOFF_TRACER
51 bool "Interrupts-off Latency Tracer" 81 bool "Interrupts-off Latency Tracer"
52 default n 82 default n
@@ -138,6 +168,70 @@ config BOOT_TRACER
138 selected, because the self-tests are an initcall as well and that 168 selected, because the self-tests are an initcall as well and that
139 would invalidate the boot trace. ) 169 would invalidate the boot trace. )
140 170
171config TRACE_BRANCH_PROFILING
172 bool "Trace likely/unlikely profiler"
173 depends on DEBUG_KERNEL
174 select TRACING
175 help
176 This tracer profiles all the the likely and unlikely macros
177 in the kernel. It will display the results in:
178
179 /debugfs/tracing/profile_annotated_branch
180
181 Note: this will add a significant overhead, only turn this
182 on if you need to profile the system's use of these macros.
183
184 Say N if unsure.
185
186config PROFILE_ALL_BRANCHES
187 bool "Profile all if conditionals"
188 depends on TRACE_BRANCH_PROFILING
189 help
190 This tracer profiles all branch conditions. Every if ()
191 taken in the kernel is recorded whether it hit or miss.
192 The results will be displayed in:
193
194 /debugfs/tracing/profile_branch
195
196 This configuration, when enabled, will impose a great overhead
197 on the system. This should only be enabled when the system
198 is to be analyzed
199
200 Say N if unsure.
201
202config TRACING_BRANCHES
203 bool
204 help
205 Selected by tracers that will trace the likely and unlikely
206 conditions. This prevents the tracers themselves from being
207 profiled. Profiling the tracing infrastructure can only happen
208 when the likelys and unlikelys are not being traced.
209
210config BRANCH_TRACER
211 bool "Trace likely/unlikely instances"
212 depends on TRACE_BRANCH_PROFILING
213 select TRACING_BRANCHES
214 help
215 This traces the events of likely and unlikely condition
216 calls in the kernel. The difference between this and the
217 "Trace likely/unlikely profiler" is that this is not a
218 histogram of the callers, but actually places the calling
219 events into a running trace buffer to see when and where the
220 events happened, as well as their results.
221
222 Say N if unsure.
223
224config POWER_TRACER
225 bool "Trace power consumption behavior"
226 depends on DEBUG_KERNEL
227 depends on X86
228 select TRACING
229 help
230 This tracer helps developers to analyze and optimize the kernels
231 power management decisions, specifically the C-state and P-state
232 behavior.
233
234
141config STACK_TRACER 235config STACK_TRACER
142 bool "Trace max stack" 236 bool "Trace max stack"
143 depends on HAVE_FUNCTION_TRACER 237 depends on HAVE_FUNCTION_TRACER
@@ -150,13 +244,26 @@ config STACK_TRACER
150 244
151 This tracer works by hooking into every function call that the 245 This tracer works by hooking into every function call that the
152 kernel executes, and keeping a maximum stack depth value and 246 kernel executes, and keeping a maximum stack depth value and
153 stack-trace saved. Because this logic has to execute in every 247 stack-trace saved. If this is configured with DYNAMIC_FTRACE
154 kernel function, all the time, this option can slow down the 248 then it will not have any overhead while the stack tracer
155 kernel measurably and is generally intended for kernel 249 is disabled.
156 developers only. 250
251 To enable the stack tracer on bootup, pass in 'stacktrace'
252 on the kernel command line.
253
254 The stack tracer can also be enabled or disabled via the
255 sysctl kernel.stack_tracer_enabled
157 256
158 Say N if unsure. 257 Say N if unsure.
159 258
259config HW_BRANCH_TRACER
260 depends on HAVE_HW_BRANCH_TRACER
261 bool "Trace hw branches"
262 select TRACING
263 help
264 This tracer records all branches on the system in a circular
265 buffer giving access to the last N branches for each cpu.
266
160config DYNAMIC_FTRACE 267config DYNAMIC_FTRACE
161 bool "enable/disable ftrace tracepoints dynamically" 268 bool "enable/disable ftrace tracepoints dynamically"
162 depends on FUNCTION_TRACER 269 depends on FUNCTION_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index c8228b1a49e9..349d5a93653f 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -10,6 +10,11 @@ CFLAGS_trace_selftest_dynamic.o = -pg
10obj-y += trace_selftest_dynamic.o 10obj-y += trace_selftest_dynamic.o
11endif 11endif
12 12
13# If unlikely tracing is enabled, do not trace these files
14ifdef CONFIG_TRACING_BRANCHES
15KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
16endif
17
13obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o 18obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
14obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 19obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
15 20
@@ -24,5 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
24obj-$(CONFIG_STACK_TRACER) += trace_stack.o 29obj-$(CONFIG_STACK_TRACER) += trace_stack.o
25obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 30obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
26obj-$(CONFIG_BOOT_TRACER) += trace_boot.o 31obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
32obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
33obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
34obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
35obj-$(CONFIG_POWER_TRACER) += trace_power.o
27 36
28libftrace-y := ftrace.o 37libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 78db083390f0..2f32969c09df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -47,6 +47,13 @@
47int ftrace_enabled __read_mostly; 47int ftrace_enabled __read_mostly;
48static int last_ftrace_enabled; 48static int last_ftrace_enabled;
49 49
50/* set when tracing only a pid */
51struct pid *ftrace_pid_trace;
52static struct pid * const ftrace_swapper_pid = &init_struct_pid;
53
54/* Quick disabling of function tracer. */
55int function_trace_stop;
56
50/* 57/*
51 * ftrace_disabled is set when an anomaly is discovered. 58 * ftrace_disabled is set when an anomaly is discovered.
52 * ftrace_disabled is much stronger than ftrace_enabled. 59 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -55,6 +62,7 @@ static int ftrace_disabled __read_mostly;
55 62
56static DEFINE_SPINLOCK(ftrace_lock); 63static DEFINE_SPINLOCK(ftrace_lock);
57static DEFINE_MUTEX(ftrace_sysctl_lock); 64static DEFINE_MUTEX(ftrace_sysctl_lock);
65static DEFINE_MUTEX(ftrace_start_lock);
58 66
59static struct ftrace_ops ftrace_list_end __read_mostly = 67static struct ftrace_ops ftrace_list_end __read_mostly =
60{ 68{
@@ -63,6 +71,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
63 71
64static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
65ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
66 76
67static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
68{ 78{
@@ -79,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 }; 89 };
80} 90}
81 91
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
94 if (!test_tsk_trace_trace(current))
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
82/** 107/**
83 * clear_ftrace_function - reset the ftrace function 108 * clear_ftrace_function - reset the ftrace function
84 * 109 *
@@ -88,7 +113,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88void clear_ftrace_function(void) 113void clear_ftrace_function(void)
89{ 114{
90 ftrace_trace_function = ftrace_stub; 115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
118}
119
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
91} 131}
132#endif
92 133
93static int __register_ftrace_function(struct ftrace_ops *ops) 134static int __register_ftrace_function(struct ftrace_ops *ops)
94{ 135{
@@ -106,14 +147,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
106 ftrace_list = ops; 147 ftrace_list = ops;
107 148
108 if (ftrace_enabled) { 149 if (ftrace_enabled) {
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
157 if (ftrace_pid_trace) {
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
109 /* 162 /*
110 * For one func, simply call it directly. 163 * For one func, simply call it directly.
111 * For more than one func, call the chain. 164 * For more than one func, call the chain.
112 */ 165 */
113 if (ops->next == &ftrace_list_end) 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
114 ftrace_trace_function = ops->func; 167 ftrace_trace_function = func;
115 else 168#else
116 ftrace_trace_function = ftrace_list_func; 169 __ftrace_trace_function = func;
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
117 } 172 }
118 173
119 spin_unlock(&ftrace_lock); 174 spin_unlock(&ftrace_lock);
@@ -152,9 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
152 207
153 if (ftrace_enabled) { 208 if (ftrace_enabled) {
154 /* If we only have one func left, then call that directly */ 209 /* If we only have one func left, then call that directly */
155 if (ftrace_list == &ftrace_list_end || 210 if (ftrace_list->next == &ftrace_list_end) {
156 ftrace_list->next == &ftrace_list_end) 211 ftrace_func_t func = ftrace_list->func;
157 ftrace_trace_function = ftrace_list->func; 212
213 if (ftrace_pid_trace) {
214 set_ftrace_pid_function(func);
215 func = ftrace_pid_func;
216 }
217#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function = func;
219#else
220 __ftrace_trace_function = func;
221#endif
222 }
158 } 223 }
159 224
160 out: 225 out:
@@ -163,6 +228,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
163 return ret; 228 return ret;
164} 229}
165 230
231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock);
237
238 if (ftrace_trace_function == ftrace_stub)
239 goto out;
240
241 func = ftrace_trace_function;
242
243 if (ftrace_pid_trace) {
244 set_ftrace_pid_function(func);
245 func = ftrace_pid_func;
246 } else {
247 if (func == ftrace_pid_func)
248 func = ftrace_pid_function;
249 }
250
251#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function = func;
253#else
254 __ftrace_trace_function = func;
255#endif
256
257 out:
258 spin_unlock(&ftrace_lock);
259}
260
166#ifdef CONFIG_DYNAMIC_FTRACE 261#ifdef CONFIG_DYNAMIC_FTRACE
167#ifndef CONFIG_FTRACE_MCOUNT_RECORD 262#ifndef CONFIG_FTRACE_MCOUNT_RECORD
168# error Dynamic ftrace depends on MCOUNT_RECORD 263# error Dynamic ftrace depends on MCOUNT_RECORD
@@ -182,6 +277,8 @@ enum {
182 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 277 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
183 FTRACE_ENABLE_MCOUNT = (1 << 3), 278 FTRACE_ENABLE_MCOUNT = (1 << 3),
184 FTRACE_DISABLE_MCOUNT = (1 << 4), 279 FTRACE_DISABLE_MCOUNT = (1 << 4),
280 FTRACE_START_FUNC_RET = (1 << 5),
281 FTRACE_STOP_FUNC_RET = (1 << 6),
185}; 282};
186 283
187static int ftrace_filtered; 284static int ftrace_filtered;
@@ -308,7 +405,7 @@ ftrace_record_ip(unsigned long ip)
308{ 405{
309 struct dyn_ftrace *rec; 406 struct dyn_ftrace *rec;
310 407
311 if (!ftrace_enabled || ftrace_disabled) 408 if (ftrace_disabled)
312 return NULL; 409 return NULL;
313 410
314 rec = ftrace_alloc_dyn_node(ip); 411 rec = ftrace_alloc_dyn_node(ip);
@@ -322,14 +419,51 @@ ftrace_record_ip(unsigned long ip)
322 return rec; 419 return rec;
323} 420}
324 421
325#define FTRACE_ADDR ((long)(ftrace_caller)) 422static void print_ip_ins(const char *fmt, unsigned char *p)
423{
424 int i;
425
426 printk(KERN_CONT "%s", fmt);
427
428 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
429 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
430}
431
432static void ftrace_bug(int failed, unsigned long ip)
433{
434 switch (failed) {
435 case -EFAULT:
436 FTRACE_WARN_ON_ONCE(1);
437 pr_info("ftrace faulted on modifying ");
438 print_ip_sym(ip);
439 break;
440 case -EINVAL:
441 FTRACE_WARN_ON_ONCE(1);
442 pr_info("ftrace failed to modify ");
443 print_ip_sym(ip);
444 print_ip_ins(" actual: ", (unsigned char *)ip);
445 printk(KERN_CONT "\n");
446 break;
447 case -EPERM:
448 FTRACE_WARN_ON_ONCE(1);
449 pr_info("ftrace faulted on writing ");
450 print_ip_sym(ip);
451 break;
452 default:
453 FTRACE_WARN_ON_ONCE(1);
454 pr_info("ftrace faulted on unknown error ");
455 print_ip_sym(ip);
456 }
457}
458
326 459
327static int 460static int
328__ftrace_replace_code(struct dyn_ftrace *rec, 461__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
329 unsigned char *nop, int enable)
330{ 462{
331 unsigned long ip, fl; 463 unsigned long ip, fl;
332 unsigned char *call, *old, *new; 464 unsigned long ftrace_addr;
465
466 ftrace_addr = (unsigned long)ftrace_caller;
333 467
334 ip = rec->ip; 468 ip = rec->ip;
335 469
@@ -388,34 +522,28 @@ __ftrace_replace_code(struct dyn_ftrace *rec,
388 } 522 }
389 } 523 }
390 524
391 call = ftrace_call_replace(ip, FTRACE_ADDR); 525 if (rec->flags & FTRACE_FL_ENABLED)
392 526 return ftrace_make_call(rec, ftrace_addr);
393 if (rec->flags & FTRACE_FL_ENABLED) { 527 else
394 old = nop; 528 return ftrace_make_nop(NULL, rec, ftrace_addr);
395 new = call;
396 } else {
397 old = call;
398 new = nop;
399 }
400
401 return ftrace_modify_code(ip, old, new);
402} 529}
403 530
404static void ftrace_replace_code(int enable) 531static void ftrace_replace_code(int enable)
405{ 532{
406 int i, failed; 533 int i, failed;
407 unsigned char *nop = NULL;
408 struct dyn_ftrace *rec; 534 struct dyn_ftrace *rec;
409 struct ftrace_page *pg; 535 struct ftrace_page *pg;
410 536
411 nop = ftrace_nop_replace();
412
413 for (pg = ftrace_pages_start; pg; pg = pg->next) { 537 for (pg = ftrace_pages_start; pg; pg = pg->next) {
414 for (i = 0; i < pg->index; i++) { 538 for (i = 0; i < pg->index; i++) {
415 rec = &pg->records[i]; 539 rec = &pg->records[i];
416 540
417 /* don't modify code that has already faulted */ 541 /*
418 if (rec->flags & FTRACE_FL_FAILED) 542 * Skip over free records and records that have
543 * failed.
544 */
545 if (rec->flags & FTRACE_FL_FREE ||
546 rec->flags & FTRACE_FL_FAILED)
419 continue; 547 continue;
420 548
421 /* ignore updates to this record's mcount site */ 549 /* ignore updates to this record's mcount site */
@@ -426,68 +554,30 @@ static void ftrace_replace_code(int enable)
426 unfreeze_record(rec); 554 unfreeze_record(rec);
427 } 555 }
428 556
429 failed = __ftrace_replace_code(rec, nop, enable); 557 failed = __ftrace_replace_code(rec, enable);
430 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 558 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
431 rec->flags |= FTRACE_FL_FAILED; 559 rec->flags |= FTRACE_FL_FAILED;
432 if ((system_state == SYSTEM_BOOTING) || 560 if ((system_state == SYSTEM_BOOTING) ||
433 !core_kernel_text(rec->ip)) { 561 !core_kernel_text(rec->ip)) {
434 ftrace_free_rec(rec); 562 ftrace_free_rec(rec);
435 } 563 } else
564 ftrace_bug(failed, rec->ip);
436 } 565 }
437 } 566 }
438 } 567 }
439} 568}
440 569
441static void print_ip_ins(const char *fmt, unsigned char *p)
442{
443 int i;
444
445 printk(KERN_CONT "%s", fmt);
446
447 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
448 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
449}
450
451static int 570static int
452ftrace_code_disable(struct dyn_ftrace *rec) 571ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
453{ 572{
454 unsigned long ip; 573 unsigned long ip;
455 unsigned char *nop, *call;
456 int ret; 574 int ret;
457 575
458 ip = rec->ip; 576 ip = rec->ip;
459 577
460 nop = ftrace_nop_replace(); 578 ret = ftrace_make_nop(mod, rec, mcount_addr);
461 call = ftrace_call_replace(ip, mcount_addr);
462
463 ret = ftrace_modify_code(ip, call, nop);
464 if (ret) { 579 if (ret) {
465 switch (ret) { 580 ftrace_bug(ret, ip);
466 case -EFAULT:
467 FTRACE_WARN_ON_ONCE(1);
468 pr_info("ftrace faulted on modifying ");
469 print_ip_sym(ip);
470 break;
471 case -EINVAL:
472 FTRACE_WARN_ON_ONCE(1);
473 pr_info("ftrace failed to modify ");
474 print_ip_sym(ip);
475 print_ip_ins(" expected: ", call);
476 print_ip_ins(" actual: ", (unsigned char *)ip);
477 print_ip_ins(" replace: ", nop);
478 printk(KERN_CONT "\n");
479 break;
480 case -EPERM:
481 FTRACE_WARN_ON_ONCE(1);
482 pr_info("ftrace faulted on writing ");
483 print_ip_sym(ip);
484 break;
485 default:
486 FTRACE_WARN_ON_ONCE(1);
487 pr_info("ftrace faulted on unknown error ");
488 print_ip_sym(ip);
489 }
490
491 rec->flags |= FTRACE_FL_FAILED; 581 rec->flags |= FTRACE_FL_FAILED;
492 return 0; 582 return 0;
493 } 583 }
@@ -506,6 +596,11 @@ static int __ftrace_modify_code(void *data)
506 if (*command & FTRACE_UPDATE_TRACE_FUNC) 596 if (*command & FTRACE_UPDATE_TRACE_FUNC)
507 ftrace_update_ftrace_func(ftrace_trace_function); 597 ftrace_update_ftrace_func(ftrace_trace_function);
508 598
599 if (*command & FTRACE_START_FUNC_RET)
600 ftrace_enable_ftrace_graph_caller();
601 else if (*command & FTRACE_STOP_FUNC_RET)
602 ftrace_disable_ftrace_graph_caller();
603
509 return 0; 604 return 0;
510} 605}
511 606
@@ -515,43 +610,43 @@ static void ftrace_run_update_code(int command)
515} 610}
516 611
517static ftrace_func_t saved_ftrace_func; 612static ftrace_func_t saved_ftrace_func;
518static int ftrace_start; 613static int ftrace_start_up;
519static DEFINE_MUTEX(ftrace_start_lock);
520 614
521static void ftrace_startup(void) 615static void ftrace_startup_enable(int command)
522{ 616{
523 int command = 0;
524
525 if (unlikely(ftrace_disabled))
526 return;
527
528 mutex_lock(&ftrace_start_lock);
529 ftrace_start++;
530 command |= FTRACE_ENABLE_CALLS;
531
532 if (saved_ftrace_func != ftrace_trace_function) { 617 if (saved_ftrace_func != ftrace_trace_function) {
533 saved_ftrace_func = ftrace_trace_function; 618 saved_ftrace_func = ftrace_trace_function;
534 command |= FTRACE_UPDATE_TRACE_FUNC; 619 command |= FTRACE_UPDATE_TRACE_FUNC;
535 } 620 }
536 621
537 if (!command || !ftrace_enabled) 622 if (!command || !ftrace_enabled)
538 goto out; 623 return;
539 624
540 ftrace_run_update_code(command); 625 ftrace_run_update_code(command);
541 out:
542 mutex_unlock(&ftrace_start_lock);
543} 626}
544 627
545static void ftrace_shutdown(void) 628static void ftrace_startup(int command)
546{ 629{
547 int command = 0; 630 if (unlikely(ftrace_disabled))
631 return;
632
633 mutex_lock(&ftrace_start_lock);
634 ftrace_start_up++;
635 command |= FTRACE_ENABLE_CALLS;
548 636
637 ftrace_startup_enable(command);
638
639 mutex_unlock(&ftrace_start_lock);
640}
641
642static void ftrace_shutdown(int command)
643{
549 if (unlikely(ftrace_disabled)) 644 if (unlikely(ftrace_disabled))
550 return; 645 return;
551 646
552 mutex_lock(&ftrace_start_lock); 647 mutex_lock(&ftrace_start_lock);
553 ftrace_start--; 648 ftrace_start_up--;
554 if (!ftrace_start) 649 if (!ftrace_start_up)
555 command |= FTRACE_DISABLE_CALLS; 650 command |= FTRACE_DISABLE_CALLS;
556 651
557 if (saved_ftrace_func != ftrace_trace_function) { 652 if (saved_ftrace_func != ftrace_trace_function) {
@@ -577,8 +672,8 @@ static void ftrace_startup_sysctl(void)
577 mutex_lock(&ftrace_start_lock); 672 mutex_lock(&ftrace_start_lock);
578 /* Force update next time */ 673 /* Force update next time */
579 saved_ftrace_func = NULL; 674 saved_ftrace_func = NULL;
580 /* ftrace_start is true if we want ftrace running */ 675 /* ftrace_start_up is true if we want ftrace running */
581 if (ftrace_start) 676 if (ftrace_start_up)
582 command |= FTRACE_ENABLE_CALLS; 677 command |= FTRACE_ENABLE_CALLS;
583 678
584 ftrace_run_update_code(command); 679 ftrace_run_update_code(command);
@@ -593,8 +688,8 @@ static void ftrace_shutdown_sysctl(void)
593 return; 688 return;
594 689
595 mutex_lock(&ftrace_start_lock); 690 mutex_lock(&ftrace_start_lock);
596 /* ftrace_start is true if ftrace is running */ 691 /* ftrace_start_up is true if ftrace is running */
597 if (ftrace_start) 692 if (ftrace_start_up)
598 command |= FTRACE_DISABLE_CALLS; 693 command |= FTRACE_DISABLE_CALLS;
599 694
600 ftrace_run_update_code(command); 695 ftrace_run_update_code(command);
@@ -605,7 +700,7 @@ static cycle_t ftrace_update_time;
605static unsigned long ftrace_update_cnt; 700static unsigned long ftrace_update_cnt;
606unsigned long ftrace_update_tot_cnt; 701unsigned long ftrace_update_tot_cnt;
607 702
608static int ftrace_update_code(void) 703static int ftrace_update_code(struct module *mod)
609{ 704{
610 struct dyn_ftrace *p, *t; 705 struct dyn_ftrace *p, *t;
611 cycle_t start, stop; 706 cycle_t start, stop;
@@ -622,7 +717,7 @@ static int ftrace_update_code(void)
622 list_del_init(&p->list); 717 list_del_init(&p->list);
623 718
624 /* convert record (i.e, patch mcount-call with NOP) */ 719 /* convert record (i.e, patch mcount-call with NOP) */
625 if (ftrace_code_disable(p)) { 720 if (ftrace_code_disable(mod, p)) {
626 p->flags |= FTRACE_FL_CONVERTED; 721 p->flags |= FTRACE_FL_CONVERTED;
627 ftrace_update_cnt++; 722 ftrace_update_cnt++;
628 } else 723 } else
@@ -690,7 +785,6 @@ enum {
690#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 785#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
691 786
692struct ftrace_iterator { 787struct ftrace_iterator {
693 loff_t pos;
694 struct ftrace_page *pg; 788 struct ftrace_page *pg;
695 unsigned idx; 789 unsigned idx;
696 unsigned flags; 790 unsigned flags;
@@ -715,6 +809,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
715 iter->pg = iter->pg->next; 809 iter->pg = iter->pg->next;
716 iter->idx = 0; 810 iter->idx = 0;
717 goto retry; 811 goto retry;
812 } else {
813 iter->idx = -1;
718 } 814 }
719 } else { 815 } else {
720 rec = &iter->pg->records[iter->idx++]; 816 rec = &iter->pg->records[iter->idx++];
@@ -737,8 +833,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
737 } 833 }
738 spin_unlock(&ftrace_lock); 834 spin_unlock(&ftrace_lock);
739 835
740 iter->pos = *pos;
741
742 return rec; 836 return rec;
743} 837}
744 838
@@ -746,13 +840,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
746{ 840{
747 struct ftrace_iterator *iter = m->private; 841 struct ftrace_iterator *iter = m->private;
748 void *p = NULL; 842 void *p = NULL;
749 loff_t l = -1;
750 843
751 if (*pos > iter->pos) 844 if (*pos > 0) {
752 *pos = iter->pos; 845 if (iter->idx < 0)
846 return p;
847 (*pos)--;
848 iter->idx--;
849 }
753 850
754 l = *pos; 851 p = t_next(m, p, pos);
755 p = t_next(m, p, &l);
756 852
757 return p; 853 return p;
758} 854}
@@ -763,21 +859,15 @@ static void t_stop(struct seq_file *m, void *p)
763 859
764static int t_show(struct seq_file *m, void *v) 860static int t_show(struct seq_file *m, void *v)
765{ 861{
766 struct ftrace_iterator *iter = m->private;
767 struct dyn_ftrace *rec = v; 862 struct dyn_ftrace *rec = v;
768 char str[KSYM_SYMBOL_LEN]; 863 char str[KSYM_SYMBOL_LEN];
769 int ret = 0;
770 864
771 if (!rec) 865 if (!rec)
772 return 0; 866 return 0;
773 867
774 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 868 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
775 869
776 ret = seq_printf(m, "%s\n", str); 870 seq_printf(m, "%s\n", str);
777 if (ret < 0) {
778 iter->pos--;
779 iter->idx--;
780 }
781 871
782 return 0; 872 return 0;
783} 873}
@@ -803,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
803 return -ENOMEM; 893 return -ENOMEM;
804 894
805 iter->pg = ftrace_pages_start; 895 iter->pg = ftrace_pages_start;
806 iter->pos = 0;
807 896
808 ret = seq_open(file, &show_ftrace_seq_ops); 897 ret = seq_open(file, &show_ftrace_seq_ops);
809 if (!ret) { 898 if (!ret) {
@@ -890,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
890 979
891 if (file->f_mode & FMODE_READ) { 980 if (file->f_mode & FMODE_READ) {
892 iter->pg = ftrace_pages_start; 981 iter->pg = ftrace_pages_start;
893 iter->pos = 0;
894 iter->flags = enable ? FTRACE_ITER_FILTER : 982 iter->flags = enable ? FTRACE_ITER_FILTER :
895 FTRACE_ITER_NOTRACE; 983 FTRACE_ITER_NOTRACE;
896 984
@@ -959,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable)
959 int type = MATCH_FULL; 1047 int type = MATCH_FULL;
960 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1048 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
961 unsigned i, match = 0, search_len = 0; 1049 unsigned i, match = 0, search_len = 0;
1050 int not = 0;
1051
1052 if (buff[0] == '!') {
1053 not = 1;
1054 buff++;
1055 len--;
1056 }
962 1057
963 for (i = 0; i < len; i++) { 1058 for (i = 0; i < len; i++) {
964 if (buff[i] == '*') { 1059 if (buff[i] == '*') {
@@ -1012,8 +1107,12 @@ ftrace_match(unsigned char *buff, int len, int enable)
1012 matched = 1; 1107 matched = 1;
1013 break; 1108 break;
1014 } 1109 }
1015 if (matched) 1110 if (matched) {
1016 rec->flags |= flag; 1111 if (not)
1112 rec->flags &= ~flag;
1113 else
1114 rec->flags |= flag;
1115 }
1017 } 1116 }
1018 pg = pg->next; 1117 pg = pg->next;
1019 } 1118 }
@@ -1181,7 +1280,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1181 1280
1182 mutex_lock(&ftrace_sysctl_lock); 1281 mutex_lock(&ftrace_sysctl_lock);
1183 mutex_lock(&ftrace_start_lock); 1282 mutex_lock(&ftrace_start_lock);
1184 if (ftrace_start && ftrace_enabled) 1283 if (ftrace_start_up && ftrace_enabled)
1185 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1284 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1186 mutex_unlock(&ftrace_start_lock); 1285 mutex_unlock(&ftrace_start_lock);
1187 mutex_unlock(&ftrace_sysctl_lock); 1286 mutex_unlock(&ftrace_sysctl_lock);
@@ -1233,12 +1332,233 @@ static struct file_operations ftrace_notrace_fops = {
1233 .release = ftrace_notrace_release, 1332 .release = ftrace_notrace_release,
1234}; 1333};
1235 1334
1236static __init int ftrace_init_debugfs(void) 1335#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1336
1337static DEFINE_MUTEX(graph_lock);
1338
1339int ftrace_graph_count;
1340unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1341
1342static void *
1343g_next(struct seq_file *m, void *v, loff_t *pos)
1237{ 1344{
1238 struct dentry *d_tracer; 1345 unsigned long *array = m->private;
1239 struct dentry *entry; 1346 int index = *pos;
1240 1347
1241 d_tracer = tracing_init_dentry(); 1348 (*pos)++;
1349
1350 if (index >= ftrace_graph_count)
1351 return NULL;
1352
1353 return &array[index];
1354}
1355
1356static void *g_start(struct seq_file *m, loff_t *pos)
1357{
1358 void *p = NULL;
1359
1360 mutex_lock(&graph_lock);
1361
1362 p = g_next(m, p, pos);
1363
1364 return p;
1365}
1366
1367static void g_stop(struct seq_file *m, void *p)
1368{
1369 mutex_unlock(&graph_lock);
1370}
1371
1372static int g_show(struct seq_file *m, void *v)
1373{
1374 unsigned long *ptr = v;
1375 char str[KSYM_SYMBOL_LEN];
1376
1377 if (!ptr)
1378 return 0;
1379
1380 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1381
1382 seq_printf(m, "%s\n", str);
1383
1384 return 0;
1385}
1386
1387static struct seq_operations ftrace_graph_seq_ops = {
1388 .start = g_start,
1389 .next = g_next,
1390 .stop = g_stop,
1391 .show = g_show,
1392};
1393
1394static int
1395ftrace_graph_open(struct inode *inode, struct file *file)
1396{
1397 int ret = 0;
1398
1399 if (unlikely(ftrace_disabled))
1400 return -ENODEV;
1401
1402 mutex_lock(&graph_lock);
1403 if ((file->f_mode & FMODE_WRITE) &&
1404 !(file->f_flags & O_APPEND)) {
1405 ftrace_graph_count = 0;
1406 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1407 }
1408
1409 if (file->f_mode & FMODE_READ) {
1410 ret = seq_open(file, &ftrace_graph_seq_ops);
1411 if (!ret) {
1412 struct seq_file *m = file->private_data;
1413 m->private = ftrace_graph_funcs;
1414 }
1415 } else
1416 file->private_data = ftrace_graph_funcs;
1417 mutex_unlock(&graph_lock);
1418
1419 return ret;
1420}
1421
1422static ssize_t
1423ftrace_graph_read(struct file *file, char __user *ubuf,
1424 size_t cnt, loff_t *ppos)
1425{
1426 if (file->f_mode & FMODE_READ)
1427 return seq_read(file, ubuf, cnt, ppos);
1428 else
1429 return -EPERM;
1430}
1431
1432static int
1433ftrace_set_func(unsigned long *array, int idx, char *buffer)
1434{
1435 char str[KSYM_SYMBOL_LEN];
1436 struct dyn_ftrace *rec;
1437 struct ftrace_page *pg;
1438 int found = 0;
1439 int i, j;
1440
1441 if (ftrace_disabled)
1442 return -ENODEV;
1443
1444 /* should not be called from interrupt context */
1445 spin_lock(&ftrace_lock);
1446
1447 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1448 for (i = 0; i < pg->index; i++) {
1449 rec = &pg->records[i];
1450
1451 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1452 continue;
1453
1454 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1455 if (strcmp(str, buffer) == 0) {
1456 found = 1;
1457 for (j = 0; j < idx; j++)
1458 if (array[j] == rec->ip) {
1459 found = 0;
1460 break;
1461 }
1462 if (found)
1463 array[idx] = rec->ip;
1464 break;
1465 }
1466 }
1467 }
1468 spin_unlock(&ftrace_lock);
1469
1470 return found ? 0 : -EINVAL;
1471}
1472
1473static ssize_t
1474ftrace_graph_write(struct file *file, const char __user *ubuf,
1475 size_t cnt, loff_t *ppos)
1476{
1477 unsigned char buffer[FTRACE_BUFF_MAX+1];
1478 unsigned long *array;
1479 size_t read = 0;
1480 ssize_t ret;
1481 int index = 0;
1482 char ch;
1483
1484 if (!cnt || cnt < 0)
1485 return 0;
1486
1487 mutex_lock(&graph_lock);
1488
1489 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1490 ret = -EBUSY;
1491 goto out;
1492 }
1493
1494 if (file->f_mode & FMODE_READ) {
1495 struct seq_file *m = file->private_data;
1496 array = m->private;
1497 } else
1498 array = file->private_data;
1499
1500 ret = get_user(ch, ubuf++);
1501 if (ret)
1502 goto out;
1503 read++;
1504 cnt--;
1505
1506 /* skip white space */
1507 while (cnt && isspace(ch)) {
1508 ret = get_user(ch, ubuf++);
1509 if (ret)
1510 goto out;
1511 read++;
1512 cnt--;
1513 }
1514
1515 if (isspace(ch)) {
1516 *ppos += read;
1517 ret = read;
1518 goto out;
1519 }
1520
1521 while (cnt && !isspace(ch)) {
1522 if (index < FTRACE_BUFF_MAX)
1523 buffer[index++] = ch;
1524 else {
1525 ret = -EINVAL;
1526 goto out;
1527 }
1528 ret = get_user(ch, ubuf++);
1529 if (ret)
1530 goto out;
1531 read++;
1532 cnt--;
1533 }
1534 buffer[index] = 0;
1535
1536 /* we allow only one at a time */
1537 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1538 if (ret)
1539 goto out;
1540
1541 ftrace_graph_count++;
1542
1543 file->f_pos += read;
1544
1545 ret = read;
1546 out:
1547 mutex_unlock(&graph_lock);
1548
1549 return ret;
1550}
1551
1552static const struct file_operations ftrace_graph_fops = {
1553 .open = ftrace_graph_open,
1554 .read = ftrace_graph_read,
1555 .write = ftrace_graph_write,
1556};
1557#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1558
1559static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1560{
1561 struct dentry *entry;
1242 1562
1243 entry = debugfs_create_file("available_filter_functions", 0444, 1563 entry = debugfs_create_file("available_filter_functions", 0444,
1244 d_tracer, NULL, &ftrace_avail_fops); 1564 d_tracer, NULL, &ftrace_avail_fops);
@@ -1263,12 +1583,20 @@ static __init int ftrace_init_debugfs(void)
1263 pr_warning("Could not create debugfs " 1583 pr_warning("Could not create debugfs "
1264 "'set_ftrace_notrace' entry\n"); 1584 "'set_ftrace_notrace' entry\n");
1265 1585
1586#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1587 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1588 NULL,
1589 &ftrace_graph_fops);
1590 if (!entry)
1591 pr_warning("Could not create debugfs "
1592 "'set_graph_function' entry\n");
1593#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1594
1266 return 0; 1595 return 0;
1267} 1596}
1268 1597
1269fs_initcall(ftrace_init_debugfs); 1598static int ftrace_convert_nops(struct module *mod,
1270 1599 unsigned long *start,
1271static int ftrace_convert_nops(unsigned long *start,
1272 unsigned long *end) 1600 unsigned long *end)
1273{ 1601{
1274 unsigned long *p; 1602 unsigned long *p;
@@ -1279,23 +1607,32 @@ static int ftrace_convert_nops(unsigned long *start,
1279 p = start; 1607 p = start;
1280 while (p < end) { 1608 while (p < end) {
1281 addr = ftrace_call_adjust(*p++); 1609 addr = ftrace_call_adjust(*p++);
1610 /*
1611 * Some architecture linkers will pad between
1612 * the different mcount_loc sections of different
1613 * object files to satisfy alignments.
1614 * Skip any NULL pointers.
1615 */
1616 if (!addr)
1617 continue;
1282 ftrace_record_ip(addr); 1618 ftrace_record_ip(addr);
1283 } 1619 }
1284 1620
1285 /* disable interrupts to prevent kstop machine */ 1621 /* disable interrupts to prevent kstop machine */
1286 local_irq_save(flags); 1622 local_irq_save(flags);
1287 ftrace_update_code(); 1623 ftrace_update_code(mod);
1288 local_irq_restore(flags); 1624 local_irq_restore(flags);
1289 mutex_unlock(&ftrace_start_lock); 1625 mutex_unlock(&ftrace_start_lock);
1290 1626
1291 return 0; 1627 return 0;
1292} 1628}
1293 1629
1294void ftrace_init_module(unsigned long *start, unsigned long *end) 1630void ftrace_init_module(struct module *mod,
1631 unsigned long *start, unsigned long *end)
1295{ 1632{
1296 if (ftrace_disabled || start == end) 1633 if (ftrace_disabled || start == end)
1297 return; 1634 return;
1298 ftrace_convert_nops(start, end); 1635 ftrace_convert_nops(mod, start, end);
1299} 1636}
1300 1637
1301extern unsigned long __start_mcount_loc[]; 1638extern unsigned long __start_mcount_loc[];
@@ -1325,7 +1662,8 @@ void __init ftrace_init(void)
1325 1662
1326 last_ftrace_enabled = ftrace_enabled = 1; 1663 last_ftrace_enabled = ftrace_enabled = 1;
1327 1664
1328 ret = ftrace_convert_nops(__start_mcount_loc, 1665 ret = ftrace_convert_nops(NULL,
1666 __start_mcount_loc,
1329 __stop_mcount_loc); 1667 __stop_mcount_loc);
1330 1668
1331 return; 1669 return;
@@ -1342,12 +1680,186 @@ static int __init ftrace_nodyn_init(void)
1342} 1680}
1343device_initcall(ftrace_nodyn_init); 1681device_initcall(ftrace_nodyn_init);
1344 1682
1345# define ftrace_startup() do { } while (0) 1683static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1346# define ftrace_shutdown() do { } while (0) 1684static inline void ftrace_startup_enable(int command) { }
1685/* Keep as macros so we do not need to define the commands */
1686# define ftrace_startup(command) do { } while (0)
1687# define ftrace_shutdown(command) do { } while (0)
1347# define ftrace_startup_sysctl() do { } while (0) 1688# define ftrace_startup_sysctl() do { } while (0)
1348# define ftrace_shutdown_sysctl() do { } while (0) 1689# define ftrace_shutdown_sysctl() do { } while (0)
1349#endif /* CONFIG_DYNAMIC_FTRACE */ 1690#endif /* CONFIG_DYNAMIC_FTRACE */
1350 1691
1692static ssize_t
1693ftrace_pid_read(struct file *file, char __user *ubuf,
1694 size_t cnt, loff_t *ppos)
1695{
1696 char buf[64];
1697 int r;
1698
1699 if (ftrace_pid_trace == ftrace_swapper_pid)
1700 r = sprintf(buf, "swapper tasks\n");
1701 else if (ftrace_pid_trace)
1702 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1703 else
1704 r = sprintf(buf, "no pid\n");
1705
1706 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1707}
1708
1709static void clear_ftrace_swapper(void)
1710{
1711 struct task_struct *p;
1712 int cpu;
1713
1714 get_online_cpus();
1715 for_each_online_cpu(cpu) {
1716 p = idle_task(cpu);
1717 clear_tsk_trace_trace(p);
1718 }
1719 put_online_cpus();
1720}
1721
1722static void set_ftrace_swapper(void)
1723{
1724 struct task_struct *p;
1725 int cpu;
1726
1727 get_online_cpus();
1728 for_each_online_cpu(cpu) {
1729 p = idle_task(cpu);
1730 set_tsk_trace_trace(p);
1731 }
1732 put_online_cpus();
1733}
1734
1735static void clear_ftrace_pid(struct pid *pid)
1736{
1737 struct task_struct *p;
1738
1739 do_each_pid_task(pid, PIDTYPE_PID, p) {
1740 clear_tsk_trace_trace(p);
1741 } while_each_pid_task(pid, PIDTYPE_PID, p);
1742 put_pid(pid);
1743}
1744
1745static void set_ftrace_pid(struct pid *pid)
1746{
1747 struct task_struct *p;
1748
1749 do_each_pid_task(pid, PIDTYPE_PID, p) {
1750 set_tsk_trace_trace(p);
1751 } while_each_pid_task(pid, PIDTYPE_PID, p);
1752}
1753
1754static void clear_ftrace_pid_task(struct pid **pid)
1755{
1756 if (*pid == ftrace_swapper_pid)
1757 clear_ftrace_swapper();
1758 else
1759 clear_ftrace_pid(*pid);
1760
1761 *pid = NULL;
1762}
1763
1764static void set_ftrace_pid_task(struct pid *pid)
1765{
1766 if (pid == ftrace_swapper_pid)
1767 set_ftrace_swapper();
1768 else
1769 set_ftrace_pid(pid);
1770}
1771
1772static ssize_t
1773ftrace_pid_write(struct file *filp, const char __user *ubuf,
1774 size_t cnt, loff_t *ppos)
1775{
1776 struct pid *pid;
1777 char buf[64];
1778 long val;
1779 int ret;
1780
1781 if (cnt >= sizeof(buf))
1782 return -EINVAL;
1783
1784 if (copy_from_user(&buf, ubuf, cnt))
1785 return -EFAULT;
1786
1787 buf[cnt] = 0;
1788
1789 ret = strict_strtol(buf, 10, &val);
1790 if (ret < 0)
1791 return ret;
1792
1793 mutex_lock(&ftrace_start_lock);
1794 if (val < 0) {
1795 /* disable pid tracing */
1796 if (!ftrace_pid_trace)
1797 goto out;
1798
1799 clear_ftrace_pid_task(&ftrace_pid_trace);
1800
1801 } else {
1802 /* swapper task is special */
1803 if (!val) {
1804 pid = ftrace_swapper_pid;
1805 if (pid == ftrace_pid_trace)
1806 goto out;
1807 } else {
1808 pid = find_get_pid(val);
1809
1810 if (pid == ftrace_pid_trace) {
1811 put_pid(pid);
1812 goto out;
1813 }
1814 }
1815
1816 if (ftrace_pid_trace)
1817 clear_ftrace_pid_task(&ftrace_pid_trace);
1818
1819 if (!pid)
1820 goto out;
1821
1822 ftrace_pid_trace = pid;
1823
1824 set_ftrace_pid_task(ftrace_pid_trace);
1825 }
1826
1827 /* update the function call */
1828 ftrace_update_pid_func();
1829 ftrace_startup_enable(0);
1830
1831 out:
1832 mutex_unlock(&ftrace_start_lock);
1833
1834 return cnt;
1835}
1836
1837static struct file_operations ftrace_pid_fops = {
1838 .read = ftrace_pid_read,
1839 .write = ftrace_pid_write,
1840};
1841
1842static __init int ftrace_init_debugfs(void)
1843{
1844 struct dentry *d_tracer;
1845 struct dentry *entry;
1846
1847 d_tracer = tracing_init_dentry();
1848 if (!d_tracer)
1849 return 0;
1850
1851 ftrace_init_dyn_debugfs(d_tracer);
1852
1853 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1854 NULL, &ftrace_pid_fops);
1855 if (!entry)
1856 pr_warning("Could not create debugfs "
1857 "'set_ftrace_pid' entry\n");
1858 return 0;
1859}
1860
1861fs_initcall(ftrace_init_debugfs);
1862
1351/** 1863/**
1352 * ftrace_kill - kill ftrace 1864 * ftrace_kill - kill ftrace
1353 * 1865 *
@@ -1381,10 +1893,11 @@ int register_ftrace_function(struct ftrace_ops *ops)
1381 return -1; 1893 return -1;
1382 1894
1383 mutex_lock(&ftrace_sysctl_lock); 1895 mutex_lock(&ftrace_sysctl_lock);
1896
1384 ret = __register_ftrace_function(ops); 1897 ret = __register_ftrace_function(ops);
1385 ftrace_startup(); 1898 ftrace_startup(0);
1386 mutex_unlock(&ftrace_sysctl_lock);
1387 1899
1900 mutex_unlock(&ftrace_sysctl_lock);
1388 return ret; 1901 return ret;
1389} 1902}
1390 1903
@@ -1400,7 +1913,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
1400 1913
1401 mutex_lock(&ftrace_sysctl_lock); 1914 mutex_lock(&ftrace_sysctl_lock);
1402 ret = __unregister_ftrace_function(ops); 1915 ret = __unregister_ftrace_function(ops);
1403 ftrace_shutdown(); 1916 ftrace_shutdown(0);
1404 mutex_unlock(&ftrace_sysctl_lock); 1917 mutex_unlock(&ftrace_sysctl_lock);
1405 1918
1406 return ret; 1919 return ret;
@@ -1449,3 +1962,153 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1449 return ret; 1962 return ret;
1450} 1963}
1451 1964
1965#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1966
1967static atomic_t ftrace_graph_active;
1968
1969int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1970{
1971 return 0;
1972}
1973
1974/* The callbacks that hook a function */
1975trace_func_graph_ret_t ftrace_graph_return =
1976 (trace_func_graph_ret_t)ftrace_stub;
1977trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1978
1979/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1980static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1981{
1982 int i;
1983 int ret = 0;
1984 unsigned long flags;
1985 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1986 struct task_struct *g, *t;
1987
1988 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1989 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1990 * sizeof(struct ftrace_ret_stack),
1991 GFP_KERNEL);
1992 if (!ret_stack_list[i]) {
1993 start = 0;
1994 end = i;
1995 ret = -ENOMEM;
1996 goto free;
1997 }
1998 }
1999
2000 read_lock_irqsave(&tasklist_lock, flags);
2001 do_each_thread(g, t) {
2002 if (start == end) {
2003 ret = -EAGAIN;
2004 goto unlock;
2005 }
2006
2007 if (t->ret_stack == NULL) {
2008 t->curr_ret_stack = -1;
2009 /* Make sure IRQs see the -1 first: */
2010 barrier();
2011 t->ret_stack = ret_stack_list[start++];
2012 atomic_set(&t->tracing_graph_pause, 0);
2013 atomic_set(&t->trace_overrun, 0);
2014 }
2015 } while_each_thread(g, t);
2016
2017unlock:
2018 read_unlock_irqrestore(&tasklist_lock, flags);
2019free:
2020 for (i = start; i < end; i++)
2021 kfree(ret_stack_list[i]);
2022 return ret;
2023}
2024
2025/* Allocate a return stack for each task */
2026static int start_graph_tracing(void)
2027{
2028 struct ftrace_ret_stack **ret_stack_list;
2029 int ret;
2030
2031 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2032 sizeof(struct ftrace_ret_stack *),
2033 GFP_KERNEL);
2034
2035 if (!ret_stack_list)
2036 return -ENOMEM;
2037
2038 do {
2039 ret = alloc_retstack_tasklist(ret_stack_list);
2040 } while (ret == -EAGAIN);
2041
2042 kfree(ret_stack_list);
2043 return ret;
2044}
2045
2046int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2047 trace_func_graph_ent_t entryfunc)
2048{
2049 int ret = 0;
2050
2051 mutex_lock(&ftrace_sysctl_lock);
2052
2053 atomic_inc(&ftrace_graph_active);
2054 ret = start_graph_tracing();
2055 if (ret) {
2056 atomic_dec(&ftrace_graph_active);
2057 goto out;
2058 }
2059
2060 ftrace_graph_return = retfunc;
2061 ftrace_graph_entry = entryfunc;
2062
2063 ftrace_startup(FTRACE_START_FUNC_RET);
2064
2065out:
2066 mutex_unlock(&ftrace_sysctl_lock);
2067 return ret;
2068}
2069
2070void unregister_ftrace_graph(void)
2071{
2072 mutex_lock(&ftrace_sysctl_lock);
2073
2074 atomic_dec(&ftrace_graph_active);
2075 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2076 ftrace_graph_entry = ftrace_graph_entry_stub;
2077 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2078
2079 mutex_unlock(&ftrace_sysctl_lock);
2080}
2081
2082/* Allocate a return stack for newly created task */
2083void ftrace_graph_init_task(struct task_struct *t)
2084{
2085 if (atomic_read(&ftrace_graph_active)) {
2086 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2087 * sizeof(struct ftrace_ret_stack),
2088 GFP_KERNEL);
2089 if (!t->ret_stack)
2090 return;
2091 t->curr_ret_stack = -1;
2092 atomic_set(&t->tracing_graph_pause, 0);
2093 atomic_set(&t->trace_overrun, 0);
2094 } else
2095 t->ret_stack = NULL;
2096}
2097
2098void ftrace_graph_exit_task(struct task_struct *t)
2099{
2100 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2101
2102 t->ret_stack = NULL;
2103 /* NULL must become visible to IRQs before we free it: */
2104 barrier();
2105
2106 kfree(ret_stack);
2107}
2108
2109void ftrace_graph_stop(void)
2110{
2111 ftrace_stop();
2112}
2113#endif
2114
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f780e9552f91..1d601a7c4587 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -18,8 +18,46 @@
18 18
19#include "trace.h" 19#include "trace.h"
20 20
21/* Global flag to disable all recording to ring buffers */ 21/*
22static int ring_buffers_off __read_mostly; 22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
27 *
28 * There's three layers that must be on in order to write
29 * to the ring buffer.
30 *
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
34 *
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
37 */
38
39/*
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
42 *
43 * ON DISABLED
44 * ---- ----------
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
48 */
49
50enum {
51 RB_BUFFERS_ON_BIT = 0,
52 RB_BUFFERS_DISABLED_BIT = 1,
53};
54
55enum {
56 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
57 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
58};
59
60static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
23 61
24/** 62/**
25 * tracing_on - enable all tracing buffers 63 * tracing_on - enable all tracing buffers
@@ -29,8 +67,9 @@ static int ring_buffers_off __read_mostly;
29 */ 67 */
30void tracing_on(void) 68void tracing_on(void)
31{ 69{
32 ring_buffers_off = 0; 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
33} 71}
72EXPORT_SYMBOL_GPL(tracing_on);
34 73
35/** 74/**
36 * tracing_off - turn off all tracing buffers 75 * tracing_off - turn off all tracing buffers
@@ -42,8 +81,22 @@ void tracing_on(void)
42 */ 81 */
43void tracing_off(void) 82void tracing_off(void)
44{ 83{
45 ring_buffers_off = 1; 84 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
46} 85}
86EXPORT_SYMBOL_GPL(tracing_off);
87
88/**
89 * tracing_off_permanent - permanently disable ring buffers
90 *
91 * This function, once called, will disable all ring buffers
92 * permanenty.
93 */
94void tracing_off_permanent(void)
95{
96 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
97}
98
99#include "trace.h"
47 100
48/* Up this if you want to test the TIME_EXTENTS and normalization */ 101/* Up this if you want to test the TIME_EXTENTS and normalization */
49#define DEBUG_SHIFT 0 102#define DEBUG_SHIFT 0
@@ -56,16 +109,18 @@ u64 ring_buffer_time_stamp(int cpu)
56 preempt_disable_notrace(); 109 preempt_disable_notrace();
57 /* shift to debug/test normalization and TIME_EXTENTS */ 110 /* shift to debug/test normalization and TIME_EXTENTS */
58 time = sched_clock() << DEBUG_SHIFT; 111 time = sched_clock() << DEBUG_SHIFT;
59 preempt_enable_notrace(); 112 preempt_enable_no_resched_notrace();
60 113
61 return time; 114 return time;
62} 115}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
63 117
64void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
65{ 119{
66 /* Just stupid testing the normalize function and deltas */ 120 /* Just stupid testing the normalize function and deltas */
67 *ts >>= DEBUG_SHIFT; 121 *ts >>= DEBUG_SHIFT;
68} 122}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
69 124
70#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
71#define RB_ALIGNMENT_SHIFT 2 126#define RB_ALIGNMENT_SHIFT 2
@@ -115,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
115{ 170{
116 return rb_event_length(event); 171 return rb_event_length(event);
117} 172}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length);
118 174
119/* inline for ring buffer fast paths */ 175/* inline for ring buffer fast paths */
120static inline void * 176static inline void *
@@ -136,6 +192,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
136{ 192{
137 return rb_event_data(event); 193 return rb_event_data(event);
138} 194}
195EXPORT_SYMBOL_GPL(ring_buffer_event_data);
139 196
140#define for_each_buffer_cpu(buffer, cpu) \ 197#define for_each_buffer_cpu(buffer, cpu) \
141 for_each_cpu_mask(cpu, buffer->cpumask) 198 for_each_cpu_mask(cpu, buffer->cpumask)
@@ -144,20 +201,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
144#define TS_MASK ((1ULL << TS_SHIFT) - 1) 201#define TS_MASK ((1ULL << TS_SHIFT) - 1)
145#define TS_DELTA_TEST (~TS_MASK) 202#define TS_DELTA_TEST (~TS_MASK)
146 203
147/* 204struct buffer_data_page {
148 * This hack stolen from mm/slob.c.
149 * We can store per page timing information in the page frame of the page.
150 * Thanks to Peter Zijlstra for suggesting this idea.
151 */
152struct buffer_page {
153 u64 time_stamp; /* page time stamp */ 205 u64 time_stamp; /* page time stamp */
154 local_t write; /* index for next write */
155 local_t commit; /* write commited index */ 206 local_t commit; /* write commited index */
207 unsigned char data[]; /* data of buffer page */
208};
209
210struct buffer_page {
211 local_t write; /* index for next write */
156 unsigned read; /* index for next read */ 212 unsigned read; /* index for next read */
157 struct list_head list; /* list of free pages */ 213 struct list_head list; /* list of free pages */
158 void *page; /* Actual data page */ 214 struct buffer_data_page *page; /* Actual data page */
159}; 215};
160 216
217static void rb_init_page(struct buffer_data_page *bpage)
218{
219 local_set(&bpage->commit, 0);
220}
221
161/* 222/*
162 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 223 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
163 * this issue out. 224 * this issue out.
@@ -179,7 +240,7 @@ static inline int test_time_stamp(u64 delta)
179 return 0; 240 return 0;
180} 241}
181 242
182#define BUF_PAGE_SIZE PAGE_SIZE 243#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
183 244
184/* 245/*
185 * head_page == tail_page && head == tail then buffer is empty. 246 * head_page == tail_page && head == tail then buffer is empty.
@@ -187,7 +248,8 @@ static inline int test_time_stamp(u64 delta)
187struct ring_buffer_per_cpu { 248struct ring_buffer_per_cpu {
188 int cpu; 249 int cpu;
189 struct ring_buffer *buffer; 250 struct ring_buffer *buffer;
190 spinlock_t lock; 251 spinlock_t reader_lock; /* serialize readers */
252 raw_spinlock_t lock;
191 struct lock_class_key lock_key; 253 struct lock_class_key lock_key;
192 struct list_head pages; 254 struct list_head pages;
193 struct buffer_page *head_page; /* read from head */ 255 struct buffer_page *head_page; /* read from head */
@@ -202,7 +264,6 @@ struct ring_buffer_per_cpu {
202}; 264};
203 265
204struct ring_buffer { 266struct ring_buffer {
205 unsigned long size;
206 unsigned pages; 267 unsigned pages;
207 unsigned flags; 268 unsigned flags;
208 int cpus; 269 int cpus;
@@ -221,32 +282,16 @@ struct ring_buffer_iter {
221 u64 read_stamp; 282 u64 read_stamp;
222}; 283};
223 284
285/* buffer may be either ring_buffer or ring_buffer_per_cpu */
224#define RB_WARN_ON(buffer, cond) \ 286#define RB_WARN_ON(buffer, cond) \
225 do { \ 287 ({ \
226 if (unlikely(cond)) { \ 288 int _____ret = unlikely(cond); \
227 atomic_inc(&buffer->record_disabled); \ 289 if (_____ret) { \
228 WARN_ON(1); \
229 } \
230 } while (0)
231
232#define RB_WARN_ON_RET(buffer, cond) \
233 do { \
234 if (unlikely(cond)) { \
235 atomic_inc(&buffer->record_disabled); \
236 WARN_ON(1); \
237 return -1; \
238 } \
239 } while (0)
240
241#define RB_WARN_ON_ONCE(buffer, cond) \
242 do { \
243 static int once; \
244 if (unlikely(cond) && !once) { \
245 once++; \
246 atomic_inc(&buffer->record_disabled); \ 290 atomic_inc(&buffer->record_disabled); \
247 WARN_ON(1); \ 291 WARN_ON(1); \
248 } \ 292 } \
249 } while (0) 293 _____ret; \
294 })
250 295
251/** 296/**
252 * check_pages - integrity check of buffer pages 297 * check_pages - integrity check of buffer pages
@@ -258,16 +303,20 @@ struct ring_buffer_iter {
258static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 303static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
259{ 304{
260 struct list_head *head = &cpu_buffer->pages; 305 struct list_head *head = &cpu_buffer->pages;
261 struct buffer_page *page, *tmp; 306 struct buffer_page *bpage, *tmp;
262 307
263 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); 308 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
264 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); 309 return -1;
310 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
311 return -1;
265 312
266 list_for_each_entry_safe(page, tmp, head, list) { 313 list_for_each_entry_safe(bpage, tmp, head, list) {
267 RB_WARN_ON_RET(cpu_buffer, 314 if (RB_WARN_ON(cpu_buffer,
268 page->list.next->prev != &page->list); 315 bpage->list.next->prev != &bpage->list))
269 RB_WARN_ON_RET(cpu_buffer, 316 return -1;
270 page->list.prev->next != &page->list); 317 if (RB_WARN_ON(cpu_buffer,
318 bpage->list.prev->next != &bpage->list))
319 return -1;
271 } 320 }
272 321
273 return 0; 322 return 0;
@@ -277,22 +326,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
277 unsigned nr_pages) 326 unsigned nr_pages)
278{ 327{
279 struct list_head *head = &cpu_buffer->pages; 328 struct list_head *head = &cpu_buffer->pages;
280 struct buffer_page *page, *tmp; 329 struct buffer_page *bpage, *tmp;
281 unsigned long addr; 330 unsigned long addr;
282 LIST_HEAD(pages); 331 LIST_HEAD(pages);
283 unsigned i; 332 unsigned i;
284 333
285 for (i = 0; i < nr_pages; i++) { 334 for (i = 0; i < nr_pages; i++) {
286 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 335 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
287 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 336 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
288 if (!page) 337 if (!bpage)
289 goto free_pages; 338 goto free_pages;
290 list_add(&page->list, &pages); 339 list_add(&bpage->list, &pages);
291 340
292 addr = __get_free_page(GFP_KERNEL); 341 addr = __get_free_page(GFP_KERNEL);
293 if (!addr) 342 if (!addr)
294 goto free_pages; 343 goto free_pages;
295 page->page = (void *)addr; 344 bpage->page = (void *)addr;
345 rb_init_page(bpage->page);
296 } 346 }
297 347
298 list_splice(&pages, head); 348 list_splice(&pages, head);
@@ -302,9 +352,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
302 return 0; 352 return 0;
303 353
304 free_pages: 354 free_pages:
305 list_for_each_entry_safe(page, tmp, &pages, list) { 355 list_for_each_entry_safe(bpage, tmp, &pages, list) {
306 list_del_init(&page->list); 356 list_del_init(&bpage->list);
307 free_buffer_page(page); 357 free_buffer_page(bpage);
308 } 358 }
309 return -ENOMEM; 359 return -ENOMEM;
310} 360}
@@ -313,7 +363,7 @@ static struct ring_buffer_per_cpu *
313rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 363rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
314{ 364{
315 struct ring_buffer_per_cpu *cpu_buffer; 365 struct ring_buffer_per_cpu *cpu_buffer;
316 struct buffer_page *page; 366 struct buffer_page *bpage;
317 unsigned long addr; 367 unsigned long addr;
318 int ret; 368 int ret;
319 369
@@ -324,19 +374,21 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
324 374
325 cpu_buffer->cpu = cpu; 375 cpu_buffer->cpu = cpu;
326 cpu_buffer->buffer = buffer; 376 cpu_buffer->buffer = buffer;
327 spin_lock_init(&cpu_buffer->lock); 377 spin_lock_init(&cpu_buffer->reader_lock);
378 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
328 INIT_LIST_HEAD(&cpu_buffer->pages); 379 INIT_LIST_HEAD(&cpu_buffer->pages);
329 380
330 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), 381 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
331 GFP_KERNEL, cpu_to_node(cpu)); 382 GFP_KERNEL, cpu_to_node(cpu));
332 if (!page) 383 if (!bpage)
333 goto fail_free_buffer; 384 goto fail_free_buffer;
334 385
335 cpu_buffer->reader_page = page; 386 cpu_buffer->reader_page = bpage;
336 addr = __get_free_page(GFP_KERNEL); 387 addr = __get_free_page(GFP_KERNEL);
337 if (!addr) 388 if (!addr)
338 goto fail_free_reader; 389 goto fail_free_reader;
339 page->page = (void *)addr; 390 bpage->page = (void *)addr;
391 rb_init_page(bpage->page);
340 392
341 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 393 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
342 394
@@ -361,14 +413,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
361static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 413static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
362{ 414{
363 struct list_head *head = &cpu_buffer->pages; 415 struct list_head *head = &cpu_buffer->pages;
364 struct buffer_page *page, *tmp; 416 struct buffer_page *bpage, *tmp;
365 417
366 list_del_init(&cpu_buffer->reader_page->list); 418 list_del_init(&cpu_buffer->reader_page->list);
367 free_buffer_page(cpu_buffer->reader_page); 419 free_buffer_page(cpu_buffer->reader_page);
368 420
369 list_for_each_entry_safe(page, tmp, head, list) { 421 list_for_each_entry_safe(bpage, tmp, head, list) {
370 list_del_init(&page->list); 422 list_del_init(&bpage->list);
371 free_buffer_page(page); 423 free_buffer_page(bpage);
372 } 424 }
373 kfree(cpu_buffer); 425 kfree(cpu_buffer);
374} 426}
@@ -381,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
381 433
382/** 434/**
383 * ring_buffer_alloc - allocate a new ring_buffer 435 * ring_buffer_alloc - allocate a new ring_buffer
384 * @size: the size in bytes that is needed. 436 * @size: the size in bytes per cpu that is needed.
385 * @flags: attributes to set for the ring buffer. 437 * @flags: attributes to set for the ring buffer.
386 * 438 *
387 * Currently the only flag that is available is the RB_FL_OVERWRITE 439 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -444,6 +496,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
444 kfree(buffer); 496 kfree(buffer);
445 return NULL; 497 return NULL;
446} 498}
499EXPORT_SYMBOL_GPL(ring_buffer_alloc);
447 500
448/** 501/**
449 * ring_buffer_free - free a ring buffer. 502 * ring_buffer_free - free a ring buffer.
@@ -459,13 +512,14 @@ ring_buffer_free(struct ring_buffer *buffer)
459 512
460 kfree(buffer); 513 kfree(buffer);
461} 514}
515EXPORT_SYMBOL_GPL(ring_buffer_free);
462 516
463static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 517static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
464 518
465static void 519static void
466rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 520rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
467{ 521{
468 struct buffer_page *page; 522 struct buffer_page *bpage;
469 struct list_head *p; 523 struct list_head *p;
470 unsigned i; 524 unsigned i;
471 525
@@ -473,13 +527,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
473 synchronize_sched(); 527 synchronize_sched();
474 528
475 for (i = 0; i < nr_pages; i++) { 529 for (i = 0; i < nr_pages; i++) {
476 BUG_ON(list_empty(&cpu_buffer->pages)); 530 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
531 return;
477 p = cpu_buffer->pages.next; 532 p = cpu_buffer->pages.next;
478 page = list_entry(p, struct buffer_page, list); 533 bpage = list_entry(p, struct buffer_page, list);
479 list_del_init(&page->list); 534 list_del_init(&bpage->list);
480 free_buffer_page(page); 535 free_buffer_page(bpage);
481 } 536 }
482 BUG_ON(list_empty(&cpu_buffer->pages)); 537 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
538 return;
483 539
484 rb_reset_cpu(cpu_buffer); 540 rb_reset_cpu(cpu_buffer);
485 541
@@ -493,7 +549,7 @@ static void
493rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 549rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
494 struct list_head *pages, unsigned nr_pages) 550 struct list_head *pages, unsigned nr_pages)
495{ 551{
496 struct buffer_page *page; 552 struct buffer_page *bpage;
497 struct list_head *p; 553 struct list_head *p;
498 unsigned i; 554 unsigned i;
499 555
@@ -501,11 +557,12 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
501 synchronize_sched(); 557 synchronize_sched();
502 558
503 for (i = 0; i < nr_pages; i++) { 559 for (i = 0; i < nr_pages; i++) {
504 BUG_ON(list_empty(pages)); 560 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
561 return;
505 p = pages->next; 562 p = pages->next;
506 page = list_entry(p, struct buffer_page, list); 563 bpage = list_entry(p, struct buffer_page, list);
507 list_del_init(&page->list); 564 list_del_init(&bpage->list);
508 list_add_tail(&page->list, &cpu_buffer->pages); 565 list_add_tail(&bpage->list, &cpu_buffer->pages);
509 } 566 }
510 rb_reset_cpu(cpu_buffer); 567 rb_reset_cpu(cpu_buffer);
511 568
@@ -532,7 +589,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
532{ 589{
533 struct ring_buffer_per_cpu *cpu_buffer; 590 struct ring_buffer_per_cpu *cpu_buffer;
534 unsigned nr_pages, rm_pages, new_pages; 591 unsigned nr_pages, rm_pages, new_pages;
535 struct buffer_page *page, *tmp; 592 struct buffer_page *bpage, *tmp;
536 unsigned long buffer_size; 593 unsigned long buffer_size;
537 unsigned long addr; 594 unsigned long addr;
538 LIST_HEAD(pages); 595 LIST_HEAD(pages);
@@ -562,7 +619,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
562 if (size < buffer_size) { 619 if (size < buffer_size) {
563 620
564 /* easy case, just free pages */ 621 /* easy case, just free pages */
565 BUG_ON(nr_pages >= buffer->pages); 622 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
623 mutex_unlock(&buffer->mutex);
624 return -1;
625 }
566 626
567 rm_pages = buffer->pages - nr_pages; 627 rm_pages = buffer->pages - nr_pages;
568 628
@@ -581,21 +641,26 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
581 * add these pages to the cpu_buffers. Otherwise we just free 641 * add these pages to the cpu_buffers. Otherwise we just free
582 * them all and return -ENOMEM; 642 * them all and return -ENOMEM;
583 */ 643 */
584 BUG_ON(nr_pages <= buffer->pages); 644 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
645 mutex_unlock(&buffer->mutex);
646 return -1;
647 }
648
585 new_pages = nr_pages - buffer->pages; 649 new_pages = nr_pages - buffer->pages;
586 650
587 for_each_buffer_cpu(buffer, cpu) { 651 for_each_buffer_cpu(buffer, cpu) {
588 for (i = 0; i < new_pages; i++) { 652 for (i = 0; i < new_pages; i++) {
589 page = kzalloc_node(ALIGN(sizeof(*page), 653 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
590 cache_line_size()), 654 cache_line_size()),
591 GFP_KERNEL, cpu_to_node(cpu)); 655 GFP_KERNEL, cpu_to_node(cpu));
592 if (!page) 656 if (!bpage)
593 goto free_pages; 657 goto free_pages;
594 list_add(&page->list, &pages); 658 list_add(&bpage->list, &pages);
595 addr = __get_free_page(GFP_KERNEL); 659 addr = __get_free_page(GFP_KERNEL);
596 if (!addr) 660 if (!addr)
597 goto free_pages; 661 goto free_pages;
598 page->page = (void *)addr; 662 bpage->page = (void *)addr;
663 rb_init_page(bpage->page);
599 } 664 }
600 } 665 }
601 666
@@ -604,7 +669,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
604 rb_insert_pages(cpu_buffer, &pages, new_pages); 669 rb_insert_pages(cpu_buffer, &pages, new_pages);
605 } 670 }
606 671
607 BUG_ON(!list_empty(&pages)); 672 if (RB_WARN_ON(buffer, !list_empty(&pages))) {
673 mutex_unlock(&buffer->mutex);
674 return -1;
675 }
608 676
609 out: 677 out:
610 buffer->pages = nr_pages; 678 buffer->pages = nr_pages;
@@ -613,22 +681,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
613 return size; 681 return size;
614 682
615 free_pages: 683 free_pages:
616 list_for_each_entry_safe(page, tmp, &pages, list) { 684 list_for_each_entry_safe(bpage, tmp, &pages, list) {
617 list_del_init(&page->list); 685 list_del_init(&bpage->list);
618 free_buffer_page(page); 686 free_buffer_page(bpage);
619 } 687 }
620 mutex_unlock(&buffer->mutex); 688 mutex_unlock(&buffer->mutex);
621 return -ENOMEM; 689 return -ENOMEM;
622} 690}
691EXPORT_SYMBOL_GPL(ring_buffer_resize);
623 692
624static inline int rb_null_event(struct ring_buffer_event *event) 693static inline int rb_null_event(struct ring_buffer_event *event)
625{ 694{
626 return event->type == RINGBUF_TYPE_PADDING; 695 return event->type == RINGBUF_TYPE_PADDING;
627} 696}
628 697
629static inline void *__rb_page_index(struct buffer_page *page, unsigned index) 698static inline void *
699__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
700{
701 return bpage->data + index;
702}
703
704static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
630{ 705{
631 return page->page + index; 706 return bpage->page->data + index;
632} 707}
633 708
634static inline struct ring_buffer_event * 709static inline struct ring_buffer_event *
@@ -658,7 +733,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage)
658 733
659static inline unsigned rb_page_commit(struct buffer_page *bpage) 734static inline unsigned rb_page_commit(struct buffer_page *bpage)
660{ 735{
661 return local_read(&bpage->commit); 736 return local_read(&bpage->page->commit);
662} 737}
663 738
664/* Size is determined by what has been commited */ 739/* Size is determined by what has been commited */
@@ -693,7 +768,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
693 head += rb_event_length(event)) { 768 head += rb_event_length(event)) {
694 769
695 event = __rb_page_index(cpu_buffer->head_page, head); 770 event = __rb_page_index(cpu_buffer->head_page, head);
696 BUG_ON(rb_null_event(event)); 771 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
772 return;
697 /* Only count data entries */ 773 /* Only count data entries */
698 if (event->type != RINGBUF_TYPE_DATA) 774 if (event->type != RINGBUF_TYPE_DATA)
699 continue; 775 continue;
@@ -703,14 +779,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
703} 779}
704 780
705static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 781static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
706 struct buffer_page **page) 782 struct buffer_page **bpage)
707{ 783{
708 struct list_head *p = (*page)->list.next; 784 struct list_head *p = (*bpage)->list.next;
709 785
710 if (p == &cpu_buffer->pages) 786 if (p == &cpu_buffer->pages)
711 p = p->next; 787 p = p->next;
712 788
713 *page = list_entry(p, struct buffer_page, list); 789 *bpage = list_entry(p, struct buffer_page, list);
714} 790}
715 791
716static inline unsigned 792static inline unsigned
@@ -746,16 +822,18 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
746 addr &= PAGE_MASK; 822 addr &= PAGE_MASK;
747 823
748 while (cpu_buffer->commit_page->page != (void *)addr) { 824 while (cpu_buffer->commit_page->page != (void *)addr) {
749 RB_WARN_ON(cpu_buffer, 825 if (RB_WARN_ON(cpu_buffer,
750 cpu_buffer->commit_page == cpu_buffer->tail_page); 826 cpu_buffer->commit_page == cpu_buffer->tail_page))
751 cpu_buffer->commit_page->commit = 827 return;
828 cpu_buffer->commit_page->page->commit =
752 cpu_buffer->commit_page->write; 829 cpu_buffer->commit_page->write;
753 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 830 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
754 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 831 cpu_buffer->write_stamp =
832 cpu_buffer->commit_page->page->time_stamp;
755 } 833 }
756 834
757 /* Now set the commit to the event's index */ 835 /* Now set the commit to the event's index */
758 local_set(&cpu_buffer->commit_page->commit, index); 836 local_set(&cpu_buffer->commit_page->page->commit, index);
759} 837}
760 838
761static inline void 839static inline void
@@ -769,25 +847,38 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
769 * back to us). This allows us to do a simple loop to 847 * back to us). This allows us to do a simple loop to
770 * assign the commit to the tail. 848 * assign the commit to the tail.
771 */ 849 */
850 again:
772 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 851 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
773 cpu_buffer->commit_page->commit = 852 cpu_buffer->commit_page->page->commit =
774 cpu_buffer->commit_page->write; 853 cpu_buffer->commit_page->write;
775 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 854 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
776 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; 855 cpu_buffer->write_stamp =
856 cpu_buffer->commit_page->page->time_stamp;
777 /* add barrier to keep gcc from optimizing too much */ 857 /* add barrier to keep gcc from optimizing too much */
778 barrier(); 858 barrier();
779 } 859 }
780 while (rb_commit_index(cpu_buffer) != 860 while (rb_commit_index(cpu_buffer) !=
781 rb_page_write(cpu_buffer->commit_page)) { 861 rb_page_write(cpu_buffer->commit_page)) {
782 cpu_buffer->commit_page->commit = 862 cpu_buffer->commit_page->page->commit =
783 cpu_buffer->commit_page->write; 863 cpu_buffer->commit_page->write;
784 barrier(); 864 barrier();
785 } 865 }
866
867 /* again, keep gcc from optimizing */
868 barrier();
869
870 /*
871 * If an interrupt came in just after the first while loop
872 * and pushed the tail page forward, we will be left with
873 * a dangling commit that will never go forward.
874 */
875 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
876 goto again;
786} 877}
787 878
788static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 879static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
789{ 880{
790 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; 881 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
791 cpu_buffer->reader_page->read = 0; 882 cpu_buffer->reader_page->read = 0;
792} 883}
793 884
@@ -806,7 +897,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
806 else 897 else
807 rb_inc_page(cpu_buffer, &iter->head_page); 898 rb_inc_page(cpu_buffer, &iter->head_page);
808 899
809 iter->read_stamp = iter->head_page->time_stamp; 900 iter->read_stamp = iter->head_page->page->time_stamp;
810 iter->head = 0; 901 iter->head = 0;
811} 902}
812 903
@@ -880,12 +971,15 @@ static struct ring_buffer_event *
880__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 971__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
881 unsigned type, unsigned long length, u64 *ts) 972 unsigned type, unsigned long length, u64 *ts)
882{ 973{
883 struct buffer_page *tail_page, *head_page, *reader_page; 974 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
884 unsigned long tail, write; 975 unsigned long tail, write;
885 struct ring_buffer *buffer = cpu_buffer->buffer; 976 struct ring_buffer *buffer = cpu_buffer->buffer;
886 struct ring_buffer_event *event; 977 struct ring_buffer_event *event;
887 unsigned long flags; 978 unsigned long flags;
888 979
980 commit_page = cpu_buffer->commit_page;
981 /* we just need to protect against interrupts */
982 barrier();
889 tail_page = cpu_buffer->tail_page; 983 tail_page = cpu_buffer->tail_page;
890 write = local_add_return(length, &tail_page->write); 984 write = local_add_return(length, &tail_page->write);
891 tail = write - length; 985 tail = write - length;
@@ -894,7 +988,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
894 if (write > BUF_PAGE_SIZE) { 988 if (write > BUF_PAGE_SIZE) {
895 struct buffer_page *next_page = tail_page; 989 struct buffer_page *next_page = tail_page;
896 990
897 spin_lock_irqsave(&cpu_buffer->lock, flags); 991 local_irq_save(flags);
992 __raw_spin_lock(&cpu_buffer->lock);
898 993
899 rb_inc_page(cpu_buffer, &next_page); 994 rb_inc_page(cpu_buffer, &next_page);
900 995
@@ -902,14 +997,15 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
902 reader_page = cpu_buffer->reader_page; 997 reader_page = cpu_buffer->reader_page;
903 998
904 /* we grabbed the lock before incrementing */ 999 /* we grabbed the lock before incrementing */
905 RB_WARN_ON(cpu_buffer, next_page == reader_page); 1000 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1001 goto out_unlock;
906 1002
907 /* 1003 /*
908 * If for some reason, we had an interrupt storm that made 1004 * If for some reason, we had an interrupt storm that made
909 * it all the way around the buffer, bail, and warn 1005 * it all the way around the buffer, bail, and warn
910 * about it. 1006 * about it.
911 */ 1007 */
912 if (unlikely(next_page == cpu_buffer->commit_page)) { 1008 if (unlikely(next_page == commit_page)) {
913 WARN_ON_ONCE(1); 1009 WARN_ON_ONCE(1);
914 goto out_unlock; 1010 goto out_unlock;
915 } 1011 }
@@ -940,12 +1036,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
940 */ 1036 */
941 if (tail_page == cpu_buffer->tail_page) { 1037 if (tail_page == cpu_buffer->tail_page) {
942 local_set(&next_page->write, 0); 1038 local_set(&next_page->write, 0);
943 local_set(&next_page->commit, 0); 1039 local_set(&next_page->page->commit, 0);
944 cpu_buffer->tail_page = next_page; 1040 cpu_buffer->tail_page = next_page;
945 1041
946 /* reread the time stamp */ 1042 /* reread the time stamp */
947 *ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1043 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
948 cpu_buffer->tail_page->time_stamp = *ts; 1044 cpu_buffer->tail_page->page->time_stamp = *ts;
949 } 1045 }
950 1046
951 /* 1047 /*
@@ -970,7 +1066,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
970 rb_set_commit_to_write(cpu_buffer); 1066 rb_set_commit_to_write(cpu_buffer);
971 } 1067 }
972 1068
973 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1069 __raw_spin_unlock(&cpu_buffer->lock);
1070 local_irq_restore(flags);
974 1071
975 /* fail and let the caller try again */ 1072 /* fail and let the caller try again */
976 return ERR_PTR(-EAGAIN); 1073 return ERR_PTR(-EAGAIN);
@@ -978,7 +1075,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
978 1075
979 /* We reserved something on the buffer */ 1076 /* We reserved something on the buffer */
980 1077
981 BUG_ON(write > BUF_PAGE_SIZE); 1078 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1079 return NULL;
982 1080
983 event = __rb_page_index(tail_page, tail); 1081 event = __rb_page_index(tail_page, tail);
984 rb_update_event(event, type, length); 1082 rb_update_event(event, type, length);
@@ -988,12 +1086,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
988 * this page's time stamp. 1086 * this page's time stamp.
989 */ 1087 */
990 if (!tail && rb_is_commit(cpu_buffer, event)) 1088 if (!tail && rb_is_commit(cpu_buffer, event))
991 cpu_buffer->commit_page->time_stamp = *ts; 1089 cpu_buffer->commit_page->page->time_stamp = *ts;
992 1090
993 return event; 1091 return event;
994 1092
995 out_unlock: 1093 out_unlock:
996 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1094 __raw_spin_unlock(&cpu_buffer->lock);
1095 local_irq_restore(flags);
997 return NULL; 1096 return NULL;
998} 1097}
999 1098
@@ -1038,7 +1137,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1038 event->time_delta = *delta & TS_MASK; 1137 event->time_delta = *delta & TS_MASK;
1039 event->array[0] = *delta >> TS_SHIFT; 1138 event->array[0] = *delta >> TS_SHIFT;
1040 } else { 1139 } else {
1041 cpu_buffer->commit_page->time_stamp = *ts; 1140 cpu_buffer->commit_page->page->time_stamp = *ts;
1042 event->time_delta = 0; 1141 event->time_delta = 0;
1043 event->array[0] = 0; 1142 event->array[0] = 0;
1044 } 1143 }
@@ -1076,10 +1175,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1076 * storm or we have something buggy. 1175 * storm or we have something buggy.
1077 * Bail! 1176 * Bail!
1078 */ 1177 */
1079 if (unlikely(++nr_loops > 1000)) { 1178 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1080 RB_WARN_ON(cpu_buffer, 1);
1081 return NULL; 1179 return NULL;
1082 }
1083 1180
1084 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1181 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1085 1182
@@ -1175,15 +1272,14 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1175 struct ring_buffer_event *event; 1272 struct ring_buffer_event *event;
1176 int cpu, resched; 1273 int cpu, resched;
1177 1274
1178 if (ring_buffers_off) 1275 if (ring_buffer_flags != RB_BUFFERS_ON)
1179 return NULL; 1276 return NULL;
1180 1277
1181 if (atomic_read(&buffer->record_disabled)) 1278 if (atomic_read(&buffer->record_disabled))
1182 return NULL; 1279 return NULL;
1183 1280
1184 /* If we are tracing schedule, we don't want to recurse */ 1281 /* If we are tracing schedule, we don't want to recurse */
1185 resched = need_resched(); 1282 resched = ftrace_preempt_disable();
1186 preempt_disable_notrace();
1187 1283
1188 cpu = raw_smp_processor_id(); 1284 cpu = raw_smp_processor_id();
1189 1285
@@ -1214,12 +1310,10 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1214 return event; 1310 return event;
1215 1311
1216 out: 1312 out:
1217 if (resched) 1313 ftrace_preempt_enable(resched);
1218 preempt_enable_notrace();
1219 else
1220 preempt_enable_notrace();
1221 return NULL; 1314 return NULL;
1222} 1315}
1316EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1223 1317
1224static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1318static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1225 struct ring_buffer_event *event) 1319 struct ring_buffer_event *event)
@@ -1259,16 +1353,14 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1259 /* 1353 /*
1260 * Only the last preempt count needs to restore preemption. 1354 * Only the last preempt count needs to restore preemption.
1261 */ 1355 */
1262 if (preempt_count() == 1) { 1356 if (preempt_count() == 1)
1263 if (per_cpu(rb_need_resched, cpu)) 1357 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1264 preempt_enable_no_resched_notrace(); 1358 else
1265 else
1266 preempt_enable_notrace();
1267 } else
1268 preempt_enable_no_resched_notrace(); 1359 preempt_enable_no_resched_notrace();
1269 1360
1270 return 0; 1361 return 0;
1271} 1362}
1363EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1272 1364
1273/** 1365/**
1274 * ring_buffer_write - write data to the buffer without reserving 1366 * ring_buffer_write - write data to the buffer without reserving
@@ -1294,14 +1386,13 @@ int ring_buffer_write(struct ring_buffer *buffer,
1294 int ret = -EBUSY; 1386 int ret = -EBUSY;
1295 int cpu, resched; 1387 int cpu, resched;
1296 1388
1297 if (ring_buffers_off) 1389 if (ring_buffer_flags != RB_BUFFERS_ON)
1298 return -EBUSY; 1390 return -EBUSY;
1299 1391
1300 if (atomic_read(&buffer->record_disabled)) 1392 if (atomic_read(&buffer->record_disabled))
1301 return -EBUSY; 1393 return -EBUSY;
1302 1394
1303 resched = need_resched(); 1395 resched = ftrace_preempt_disable();
1304 preempt_disable_notrace();
1305 1396
1306 cpu = raw_smp_processor_id(); 1397 cpu = raw_smp_processor_id();
1307 1398
@@ -1327,13 +1418,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
1327 1418
1328 ret = 0; 1419 ret = 0;
1329 out: 1420 out:
1330 if (resched) 1421 ftrace_preempt_enable(resched);
1331 preempt_enable_no_resched_notrace();
1332 else
1333 preempt_enable_notrace();
1334 1422
1335 return ret; 1423 return ret;
1336} 1424}
1425EXPORT_SYMBOL_GPL(ring_buffer_write);
1337 1426
1338static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1427static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1339{ 1428{
@@ -1360,6 +1449,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1360{ 1449{
1361 atomic_inc(&buffer->record_disabled); 1450 atomic_inc(&buffer->record_disabled);
1362} 1451}
1452EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1363 1453
1364/** 1454/**
1365 * ring_buffer_record_enable - enable writes to the buffer 1455 * ring_buffer_record_enable - enable writes to the buffer
@@ -1372,6 +1462,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1372{ 1462{
1373 atomic_dec(&buffer->record_disabled); 1463 atomic_dec(&buffer->record_disabled);
1374} 1464}
1465EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1375 1466
1376/** 1467/**
1377 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1468 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1393,6 +1484,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1393 cpu_buffer = buffer->buffers[cpu]; 1484 cpu_buffer = buffer->buffers[cpu];
1394 atomic_inc(&cpu_buffer->record_disabled); 1485 atomic_inc(&cpu_buffer->record_disabled);
1395} 1486}
1487EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1396 1488
1397/** 1489/**
1398 * ring_buffer_record_enable_cpu - enable writes to the buffer 1490 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1412,6 +1504,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1412 cpu_buffer = buffer->buffers[cpu]; 1504 cpu_buffer = buffer->buffers[cpu];
1413 atomic_dec(&cpu_buffer->record_disabled); 1505 atomic_dec(&cpu_buffer->record_disabled);
1414} 1506}
1507EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1415 1508
1416/** 1509/**
1417 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1510 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1428,6 +1521,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1428 cpu_buffer = buffer->buffers[cpu]; 1521 cpu_buffer = buffer->buffers[cpu];
1429 return cpu_buffer->entries; 1522 return cpu_buffer->entries;
1430} 1523}
1524EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1431 1525
1432/** 1526/**
1433 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1527 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1444,6 +1538,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1444 cpu_buffer = buffer->buffers[cpu]; 1538 cpu_buffer = buffer->buffers[cpu];
1445 return cpu_buffer->overrun; 1539 return cpu_buffer->overrun;
1446} 1540}
1541EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1447 1542
1448/** 1543/**
1449 * ring_buffer_entries - get the number of entries in a buffer 1544 * ring_buffer_entries - get the number of entries in a buffer
@@ -1466,6 +1561,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1466 1561
1467 return entries; 1562 return entries;
1468} 1563}
1564EXPORT_SYMBOL_GPL(ring_buffer_entries);
1469 1565
1470/** 1566/**
1471 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1567 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1488,15 +1584,9 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1488 1584
1489 return overruns; 1585 return overruns;
1490} 1586}
1587EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1491 1588
1492/** 1589static void rb_iter_reset(struct ring_buffer_iter *iter)
1493 * ring_buffer_iter_reset - reset an iterator
1494 * @iter: The iterator to reset
1495 *
1496 * Resets the iterator, so that it will start from the beginning
1497 * again.
1498 */
1499void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1500{ 1590{
1501 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1591 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1502 1592
@@ -1511,10 +1601,28 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1511 if (iter->head) 1601 if (iter->head)
1512 iter->read_stamp = cpu_buffer->read_stamp; 1602 iter->read_stamp = cpu_buffer->read_stamp;
1513 else 1603 else
1514 iter->read_stamp = iter->head_page->time_stamp; 1604 iter->read_stamp = iter->head_page->page->time_stamp;
1515} 1605}
1516 1606
1517/** 1607/**
1608 * ring_buffer_iter_reset - reset an iterator
1609 * @iter: The iterator to reset
1610 *
1611 * Resets the iterator, so that it will start from the beginning
1612 * again.
1613 */
1614void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1615{
1616 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1617 unsigned long flags;
1618
1619 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1620 rb_iter_reset(iter);
1621 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1622}
1623EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1624
1625/**
1518 * ring_buffer_iter_empty - check if an iterator has no more to read 1626 * ring_buffer_iter_empty - check if an iterator has no more to read
1519 * @iter: The iterator to check 1627 * @iter: The iterator to check
1520 */ 1628 */
@@ -1527,6 +1635,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1527 return iter->head_page == cpu_buffer->commit_page && 1635 return iter->head_page == cpu_buffer->commit_page &&
1528 iter->head == rb_commit_index(cpu_buffer); 1636 iter->head == rb_commit_index(cpu_buffer);
1529} 1637}
1638EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1530 1639
1531static void 1640static void
1532rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1641rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1597,7 +1706,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1597 unsigned long flags; 1706 unsigned long flags;
1598 int nr_loops = 0; 1707 int nr_loops = 0;
1599 1708
1600 spin_lock_irqsave(&cpu_buffer->lock, flags); 1709 local_irq_save(flags);
1710 __raw_spin_lock(&cpu_buffer->lock);
1601 1711
1602 again: 1712 again:
1603 /* 1713 /*
@@ -1606,8 +1716,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1606 * a case where we will loop three times. There should be no 1716 * a case where we will loop three times. There should be no
1607 * reason to loop four times (that I know of). 1717 * reason to loop four times (that I know of).
1608 */ 1718 */
1609 if (unlikely(++nr_loops > 3)) { 1719 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1610 RB_WARN_ON(cpu_buffer, 1);
1611 reader = NULL; 1720 reader = NULL;
1612 goto out; 1721 goto out;
1613 } 1722 }
@@ -1619,8 +1728,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1619 goto out; 1728 goto out;
1620 1729
1621 /* Never should we have an index greater than the size */ 1730 /* Never should we have an index greater than the size */
1622 RB_WARN_ON(cpu_buffer, 1731 if (RB_WARN_ON(cpu_buffer,
1623 cpu_buffer->reader_page->read > rb_page_size(reader)); 1732 cpu_buffer->reader_page->read > rb_page_size(reader)))
1733 goto out;
1624 1734
1625 /* check if we caught up to the tail */ 1735 /* check if we caught up to the tail */
1626 reader = NULL; 1736 reader = NULL;
@@ -1637,7 +1747,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1637 cpu_buffer->reader_page->list.prev = reader->list.prev; 1747 cpu_buffer->reader_page->list.prev = reader->list.prev;
1638 1748
1639 local_set(&cpu_buffer->reader_page->write, 0); 1749 local_set(&cpu_buffer->reader_page->write, 0);
1640 local_set(&cpu_buffer->reader_page->commit, 0); 1750 local_set(&cpu_buffer->reader_page->page->commit, 0);
1641 1751
1642 /* Make the reader page now replace the head */ 1752 /* Make the reader page now replace the head */
1643 reader->list.prev->next = &cpu_buffer->reader_page->list; 1753 reader->list.prev->next = &cpu_buffer->reader_page->list;
@@ -1659,7 +1769,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1659 goto again; 1769 goto again;
1660 1770
1661 out: 1771 out:
1662 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 1772 __raw_spin_unlock(&cpu_buffer->lock);
1773 local_irq_restore(flags);
1663 1774
1664 return reader; 1775 return reader;
1665} 1776}
@@ -1673,7 +1784,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1673 reader = rb_get_reader_page(cpu_buffer); 1784 reader = rb_get_reader_page(cpu_buffer);
1674 1785
1675 /* This function should not be called when buffer is empty */ 1786 /* This function should not be called when buffer is empty */
1676 BUG_ON(!reader); 1787 if (RB_WARN_ON(cpu_buffer, !reader))
1788 return;
1677 1789
1678 event = rb_reader_event(cpu_buffer); 1790 event = rb_reader_event(cpu_buffer);
1679 1791
@@ -1700,7 +1812,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1700 * Check if we are at the end of the buffer. 1812 * Check if we are at the end of the buffer.
1701 */ 1813 */
1702 if (iter->head >= rb_page_size(iter->head_page)) { 1814 if (iter->head >= rb_page_size(iter->head_page)) {
1703 BUG_ON(iter->head_page == cpu_buffer->commit_page); 1815 if (RB_WARN_ON(buffer,
1816 iter->head_page == cpu_buffer->commit_page))
1817 return;
1704 rb_inc_iter(iter); 1818 rb_inc_iter(iter);
1705 return; 1819 return;
1706 } 1820 }
@@ -1713,8 +1827,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1713 * This should not be called to advance the header if we are 1827 * This should not be called to advance the header if we are
1714 * at the tail of the buffer. 1828 * at the tail of the buffer.
1715 */ 1829 */
1716 BUG_ON((iter->head_page == cpu_buffer->commit_page) && 1830 if (RB_WARN_ON(cpu_buffer,
1717 (iter->head + length > rb_commit_index(cpu_buffer))); 1831 (iter->head_page == cpu_buffer->commit_page) &&
1832 (iter->head + length > rb_commit_index(cpu_buffer))))
1833 return;
1718 1834
1719 rb_update_iter_read_stamp(iter, event); 1835 rb_update_iter_read_stamp(iter, event);
1720 1836
@@ -1726,17 +1842,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
1726 rb_advance_iter(iter); 1842 rb_advance_iter(iter);
1727} 1843}
1728 1844
1729/** 1845static struct ring_buffer_event *
1730 * ring_buffer_peek - peek at the next event to be read 1846rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1731 * @buffer: The ring buffer to read
1732 * @cpu: The cpu to peak at
1733 * @ts: The timestamp counter of this event.
1734 *
1735 * This will return the event that will be read next, but does
1736 * not consume the data.
1737 */
1738struct ring_buffer_event *
1739ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1740{ 1847{
1741 struct ring_buffer_per_cpu *cpu_buffer; 1848 struct ring_buffer_per_cpu *cpu_buffer;
1742 struct ring_buffer_event *event; 1849 struct ring_buffer_event *event;
@@ -1757,10 +1864,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1757 * can have. Nesting 10 deep of interrupts is clearly 1864 * can have. Nesting 10 deep of interrupts is clearly
1758 * an anomaly. 1865 * an anomaly.
1759 */ 1866 */
1760 if (unlikely(++nr_loops > 10)) { 1867 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1761 RB_WARN_ON(cpu_buffer, 1);
1762 return NULL; 1868 return NULL;
1763 }
1764 1869
1765 reader = rb_get_reader_page(cpu_buffer); 1870 reader = rb_get_reader_page(cpu_buffer);
1766 if (!reader) 1871 if (!reader)
@@ -1797,17 +1902,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1797 1902
1798 return NULL; 1903 return NULL;
1799} 1904}
1905EXPORT_SYMBOL_GPL(ring_buffer_peek);
1800 1906
1801/** 1907static struct ring_buffer_event *
1802 * ring_buffer_iter_peek - peek at the next event to be read 1908rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1803 * @iter: The ring buffer iterator
1804 * @ts: The timestamp counter of this event.
1805 *
1806 * This will return the event that will be read next, but does
1807 * not increment the iterator.
1808 */
1809struct ring_buffer_event *
1810ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1811{ 1909{
1812 struct ring_buffer *buffer; 1910 struct ring_buffer *buffer;
1813 struct ring_buffer_per_cpu *cpu_buffer; 1911 struct ring_buffer_per_cpu *cpu_buffer;
@@ -1829,10 +1927,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1829 * can have. Nesting 10 deep of interrupts is clearly 1927 * can have. Nesting 10 deep of interrupts is clearly
1830 * an anomaly. 1928 * an anomaly.
1831 */ 1929 */
1832 if (unlikely(++nr_loops > 10)) { 1930 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1833 RB_WARN_ON(cpu_buffer, 1);
1834 return NULL; 1931 return NULL;
1835 }
1836 1932
1837 if (rb_per_cpu_empty(cpu_buffer)) 1933 if (rb_per_cpu_empty(cpu_buffer))
1838 return NULL; 1934 return NULL;
@@ -1867,6 +1963,52 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1867 1963
1868 return NULL; 1964 return NULL;
1869} 1965}
1966EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1967
1968/**
1969 * ring_buffer_peek - peek at the next event to be read
1970 * @buffer: The ring buffer to read
1971 * @cpu: The cpu to peak at
1972 * @ts: The timestamp counter of this event.
1973 *
1974 * This will return the event that will be read next, but does
1975 * not consume the data.
1976 */
1977struct ring_buffer_event *
1978ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1979{
1980 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1981 struct ring_buffer_event *event;
1982 unsigned long flags;
1983
1984 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1985 event = rb_buffer_peek(buffer, cpu, ts);
1986 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1987
1988 return event;
1989}
1990
1991/**
1992 * ring_buffer_iter_peek - peek at the next event to be read
1993 * @iter: The ring buffer iterator
1994 * @ts: The timestamp counter of this event.
1995 *
1996 * This will return the event that will be read next, but does
1997 * not increment the iterator.
1998 */
1999struct ring_buffer_event *
2000ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2001{
2002 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2003 struct ring_buffer_event *event;
2004 unsigned long flags;
2005
2006 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2007 event = rb_iter_peek(iter, ts);
2008 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2009
2010 return event;
2011}
1870 2012
1871/** 2013/**
1872 * ring_buffer_consume - return an event and consume it 2014 * ring_buffer_consume - return an event and consume it
@@ -1879,21 +2021,27 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1879struct ring_buffer_event * 2021struct ring_buffer_event *
1880ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 2022ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1881{ 2023{
1882 struct ring_buffer_per_cpu *cpu_buffer; 2024 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1883 struct ring_buffer_event *event; 2025 struct ring_buffer_event *event;
2026 unsigned long flags;
1884 2027
1885 if (!cpu_isset(cpu, buffer->cpumask)) 2028 if (!cpu_isset(cpu, buffer->cpumask))
1886 return NULL; 2029 return NULL;
1887 2030
1888 event = ring_buffer_peek(buffer, cpu, ts); 2031 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2032
2033 event = rb_buffer_peek(buffer, cpu, ts);
1889 if (!event) 2034 if (!event)
1890 return NULL; 2035 goto out;
1891 2036
1892 cpu_buffer = buffer->buffers[cpu];
1893 rb_advance_reader(cpu_buffer); 2037 rb_advance_reader(cpu_buffer);
1894 2038
2039 out:
2040 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2041
1895 return event; 2042 return event;
1896} 2043}
2044EXPORT_SYMBOL_GPL(ring_buffer_consume);
1897 2045
1898/** 2046/**
1899 * ring_buffer_read_start - start a non consuming read of the buffer 2047 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -1928,12 +2076,15 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1928 atomic_inc(&cpu_buffer->record_disabled); 2076 atomic_inc(&cpu_buffer->record_disabled);
1929 synchronize_sched(); 2077 synchronize_sched();
1930 2078
1931 spin_lock_irqsave(&cpu_buffer->lock, flags); 2079 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1932 ring_buffer_iter_reset(iter); 2080 __raw_spin_lock(&cpu_buffer->lock);
1933 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 2081 rb_iter_reset(iter);
2082 __raw_spin_unlock(&cpu_buffer->lock);
2083 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1934 2084
1935 return iter; 2085 return iter;
1936} 2086}
2087EXPORT_SYMBOL_GPL(ring_buffer_read_start);
1937 2088
1938/** 2089/**
1939 * ring_buffer_finish - finish reading the iterator of the buffer 2090 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -1950,6 +2101,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
1950 atomic_dec(&cpu_buffer->record_disabled); 2101 atomic_dec(&cpu_buffer->record_disabled);
1951 kfree(iter); 2102 kfree(iter);
1952} 2103}
2104EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
1953 2105
1954/** 2106/**
1955 * ring_buffer_read - read the next item in the ring buffer by the iterator 2107 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -1962,15 +2114,21 @@ struct ring_buffer_event *
1962ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 2114ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1963{ 2115{
1964 struct ring_buffer_event *event; 2116 struct ring_buffer_event *event;
2117 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2118 unsigned long flags;
1965 2119
1966 event = ring_buffer_iter_peek(iter, ts); 2120 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2121 event = rb_iter_peek(iter, ts);
1967 if (!event) 2122 if (!event)
1968 return NULL; 2123 goto out;
1969 2124
1970 rb_advance_iter(iter); 2125 rb_advance_iter(iter);
2126 out:
2127 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1971 2128
1972 return event; 2129 return event;
1973} 2130}
2131EXPORT_SYMBOL_GPL(ring_buffer_read);
1974 2132
1975/** 2133/**
1976 * ring_buffer_size - return the size of the ring buffer (in bytes) 2134 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -1980,6 +2138,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
1980{ 2138{
1981 return BUF_PAGE_SIZE * buffer->pages; 2139 return BUF_PAGE_SIZE * buffer->pages;
1982} 2140}
2141EXPORT_SYMBOL_GPL(ring_buffer_size);
1983 2142
1984static void 2143static void
1985rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2144rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -1987,7 +2146,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1987 cpu_buffer->head_page 2146 cpu_buffer->head_page
1988 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 2147 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1989 local_set(&cpu_buffer->head_page->write, 0); 2148 local_set(&cpu_buffer->head_page->write, 0);
1990 local_set(&cpu_buffer->head_page->commit, 0); 2149 local_set(&cpu_buffer->head_page->page->commit, 0);
1991 2150
1992 cpu_buffer->head_page->read = 0; 2151 cpu_buffer->head_page->read = 0;
1993 2152
@@ -1996,7 +2155,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1996 2155
1997 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 2156 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1998 local_set(&cpu_buffer->reader_page->write, 0); 2157 local_set(&cpu_buffer->reader_page->write, 0);
1999 local_set(&cpu_buffer->reader_page->commit, 0); 2158 local_set(&cpu_buffer->reader_page->page->commit, 0);
2000 cpu_buffer->reader_page->read = 0; 2159 cpu_buffer->reader_page->read = 0;
2001 2160
2002 cpu_buffer->overrun = 0; 2161 cpu_buffer->overrun = 0;
@@ -2016,12 +2175,17 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2016 if (!cpu_isset(cpu, buffer->cpumask)) 2175 if (!cpu_isset(cpu, buffer->cpumask))
2017 return; 2176 return;
2018 2177
2019 spin_lock_irqsave(&cpu_buffer->lock, flags); 2178 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2179
2180 __raw_spin_lock(&cpu_buffer->lock);
2020 2181
2021 rb_reset_cpu(cpu_buffer); 2182 rb_reset_cpu(cpu_buffer);
2022 2183
2023 spin_unlock_irqrestore(&cpu_buffer->lock, flags); 2184 __raw_spin_unlock(&cpu_buffer->lock);
2185
2186 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2024} 2187}
2188EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2025 2189
2026/** 2190/**
2027 * ring_buffer_reset - reset a ring buffer 2191 * ring_buffer_reset - reset a ring buffer
@@ -2034,6 +2198,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2034 for_each_buffer_cpu(buffer, cpu) 2198 for_each_buffer_cpu(buffer, cpu)
2035 ring_buffer_reset_cpu(buffer, cpu); 2199 ring_buffer_reset_cpu(buffer, cpu);
2036} 2200}
2201EXPORT_SYMBOL_GPL(ring_buffer_reset);
2037 2202
2038/** 2203/**
2039 * rind_buffer_empty - is the ring buffer empty? 2204 * rind_buffer_empty - is the ring buffer empty?
@@ -2052,6 +2217,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2052 } 2217 }
2053 return 1; 2218 return 1;
2054} 2219}
2220EXPORT_SYMBOL_GPL(ring_buffer_empty);
2055 2221
2056/** 2222/**
2057 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2223 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2068,6 +2234,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2068 cpu_buffer = buffer->buffers[cpu]; 2234 cpu_buffer = buffer->buffers[cpu];
2069 return rb_per_cpu_empty(cpu_buffer); 2235 return rb_per_cpu_empty(cpu_buffer);
2070} 2236}
2237EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2071 2238
2072/** 2239/**
2073 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2240 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2090,8 +2257,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2090 return -EINVAL; 2257 return -EINVAL;
2091 2258
2092 /* At least make sure the two buffers are somewhat the same */ 2259 /* At least make sure the two buffers are somewhat the same */
2093 if (buffer_a->size != buffer_b->size || 2260 if (buffer_a->pages != buffer_b->pages)
2094 buffer_a->pages != buffer_b->pages)
2095 return -EINVAL; 2261 return -EINVAL;
2096 2262
2097 cpu_buffer_a = buffer_a->buffers[cpu]; 2263 cpu_buffer_a = buffer_a->buffers[cpu];
@@ -2117,17 +2283,180 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2117 2283
2118 return 0; 2284 return 0;
2119} 2285}
2286EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2287
2288static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2289 struct buffer_data_page *bpage)
2290{
2291 struct ring_buffer_event *event;
2292 unsigned long head;
2293
2294 __raw_spin_lock(&cpu_buffer->lock);
2295 for (head = 0; head < local_read(&bpage->commit);
2296 head += rb_event_length(event)) {
2297
2298 event = __rb_data_page_index(bpage, head);
2299 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2300 return;
2301 /* Only count data entries */
2302 if (event->type != RINGBUF_TYPE_DATA)
2303 continue;
2304 cpu_buffer->entries--;
2305 }
2306 __raw_spin_unlock(&cpu_buffer->lock);
2307}
2308
2309/**
2310 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2311 * @buffer: the buffer to allocate for.
2312 *
2313 * This function is used in conjunction with ring_buffer_read_page.
2314 * When reading a full page from the ring buffer, these functions
2315 * can be used to speed up the process. The calling function should
2316 * allocate a few pages first with this function. Then when it
2317 * needs to get pages from the ring buffer, it passes the result
2318 * of this function into ring_buffer_read_page, which will swap
2319 * the page that was allocated, with the read page of the buffer.
2320 *
2321 * Returns:
2322 * The page allocated, or NULL on error.
2323 */
2324void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2325{
2326 unsigned long addr;
2327 struct buffer_data_page *bpage;
2328
2329 addr = __get_free_page(GFP_KERNEL);
2330 if (!addr)
2331 return NULL;
2332
2333 bpage = (void *)addr;
2334
2335 return bpage;
2336}
2337
2338/**
2339 * ring_buffer_free_read_page - free an allocated read page
2340 * @buffer: the buffer the page was allocate for
2341 * @data: the page to free
2342 *
2343 * Free a page allocated from ring_buffer_alloc_read_page.
2344 */
2345void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2346{
2347 free_page((unsigned long)data);
2348}
2349
2350/**
2351 * ring_buffer_read_page - extract a page from the ring buffer
2352 * @buffer: buffer to extract from
2353 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2354 * @cpu: the cpu of the buffer to extract
2355 * @full: should the extraction only happen when the page is full.
2356 *
2357 * This function will pull out a page from the ring buffer and consume it.
2358 * @data_page must be the address of the variable that was returned
2359 * from ring_buffer_alloc_read_page. This is because the page might be used
2360 * to swap with a page in the ring buffer.
2361 *
2362 * for example:
2363 * rpage = ring_buffer_alloc_page(buffer);
2364 * if (!rpage)
2365 * return error;
2366 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2367 * if (ret)
2368 * process_page(rpage);
2369 *
2370 * When @full is set, the function will not return true unless
2371 * the writer is off the reader page.
2372 *
2373 * Note: it is up to the calling functions to handle sleeps and wakeups.
2374 * The ring buffer can be used anywhere in the kernel and can not
2375 * blindly call wake_up. The layer that uses the ring buffer must be
2376 * responsible for that.
2377 *
2378 * Returns:
2379 * 1 if data has been transferred
2380 * 0 if no data has been transferred.
2381 */
2382int ring_buffer_read_page(struct ring_buffer *buffer,
2383 void **data_page, int cpu, int full)
2384{
2385 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2386 struct ring_buffer_event *event;
2387 struct buffer_data_page *bpage;
2388 unsigned long flags;
2389 int ret = 0;
2390
2391 if (!data_page)
2392 return 0;
2393
2394 bpage = *data_page;
2395 if (!bpage)
2396 return 0;
2397
2398 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2399
2400 /*
2401 * rb_buffer_peek will get the next ring buffer if
2402 * the current reader page is empty.
2403 */
2404 event = rb_buffer_peek(buffer, cpu, NULL);
2405 if (!event)
2406 goto out;
2407
2408 /* check for data */
2409 if (!local_read(&cpu_buffer->reader_page->page->commit))
2410 goto out;
2411 /*
2412 * If the writer is already off of the read page, then simply
2413 * switch the read page with the given page. Otherwise
2414 * we need to copy the data from the reader to the writer.
2415 */
2416 if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
2417 unsigned int read = cpu_buffer->reader_page->read;
2418
2419 if (full)
2420 goto out;
2421 /* The writer is still on the reader page, we must copy */
2422 bpage = cpu_buffer->reader_page->page;
2423 memcpy(bpage->data,
2424 cpu_buffer->reader_page->page->data + read,
2425 local_read(&bpage->commit) - read);
2426
2427 /* consume what was read */
2428 cpu_buffer->reader_page += read;
2429
2430 } else {
2431 /* swap the pages */
2432 rb_init_page(bpage);
2433 bpage = cpu_buffer->reader_page->page;
2434 cpu_buffer->reader_page->page = *data_page;
2435 cpu_buffer->reader_page->read = 0;
2436 *data_page = bpage;
2437 }
2438 ret = 1;
2439
2440 /* update the entry counter */
2441 rb_remove_entries(cpu_buffer, bpage);
2442 out:
2443 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2444
2445 return ret;
2446}
2120 2447
2121static ssize_t 2448static ssize_t
2122rb_simple_read(struct file *filp, char __user *ubuf, 2449rb_simple_read(struct file *filp, char __user *ubuf,
2123 size_t cnt, loff_t *ppos) 2450 size_t cnt, loff_t *ppos)
2124{ 2451{
2125 int *p = filp->private_data; 2452 long *p = filp->private_data;
2126 char buf[64]; 2453 char buf[64];
2127 int r; 2454 int r;
2128 2455
2129 /* !ring_buffers_off == tracing_on */ 2456 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2130 r = sprintf(buf, "%d\n", !*p); 2457 r = sprintf(buf, "permanently disabled\n");
2458 else
2459 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2131 2460
2132 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2461 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2133} 2462}
@@ -2136,7 +2465,7 @@ static ssize_t
2136rb_simple_write(struct file *filp, const char __user *ubuf, 2465rb_simple_write(struct file *filp, const char __user *ubuf,
2137 size_t cnt, loff_t *ppos) 2466 size_t cnt, loff_t *ppos)
2138{ 2467{
2139 int *p = filp->private_data; 2468 long *p = filp->private_data;
2140 char buf[64]; 2469 char buf[64];
2141 long val; 2470 long val;
2142 int ret; 2471 int ret;
@@ -2153,8 +2482,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
2153 if (ret < 0) 2482 if (ret < 0)
2154 return ret; 2483 return ret;
2155 2484
2156 /* !ring_buffers_off == tracing_on */ 2485 if (val)
2157 *p = !val; 2486 set_bit(RB_BUFFERS_ON_BIT, p);
2487 else
2488 clear_bit(RB_BUFFERS_ON_BIT, p);
2158 2489
2159 (*ppos)++; 2490 (*ppos)++;
2160 2491
@@ -2176,7 +2507,7 @@ static __init int rb_init_debugfs(void)
2176 d_tracer = tracing_init_dentry(); 2507 d_tracer = tracing_init_dentry();
2177 2508
2178 entry = debugfs_create_file("tracing_on", 0644, d_tracer, 2509 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2179 &ring_buffers_off, &rb_simple_fops); 2510 &ring_buffer_flags, &rb_simple_fops);
2180 if (!entry) 2511 if (!entry)
2181 pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2512 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2182 2513
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d86e3252f300..4185d5221633 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,6 +43,38 @@
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
44unsigned long __read_mostly tracing_thresh; 44unsigned long __read_mostly tracing_thresh;
45 45
46/*
47 * We need to change this state when a selftest is running.
48 * A selftest will lurk into the ring-buffer to count the
49 * entries inserted during the selftest although some concurrent
50 * insertions into the ring-buffer such as ftrace_printk could occurred
51 * at the same time, giving false positive or negative results.
52 */
53static bool __read_mostly tracing_selftest_running;
54
55/* For tracers that don't implement custom flags */
56static struct tracer_opt dummy_tracer_opt[] = {
57 { }
58};
59
60static struct tracer_flags dummy_tracer_flags = {
61 .val = 0,
62 .opts = dummy_tracer_opt
63};
64
65static int dummy_set_flag(u32 old_flags, u32 bit, int set)
66{
67 return 0;
68}
69
70/*
71 * Kill all tracing for good (never come back).
72 * It is initialized to 1 but will turn to zero if the initialization
73 * of the tracer is successful. But that is the only place that sets
74 * this back to zero.
75 */
76int tracing_disabled = 1;
77
46static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 78static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47 79
48static inline void ftrace_disable_cpu(void) 80static inline void ftrace_disable_cpu(void)
@@ -62,7 +94,36 @@ static cpumask_t __read_mostly tracing_buffer_mask;
62#define for_each_tracing_cpu(cpu) \ 94#define for_each_tracing_cpu(cpu) \
63 for_each_cpu_mask(cpu, tracing_buffer_mask) 95 for_each_cpu_mask(cpu, tracing_buffer_mask)
64 96
65static int tracing_disabled = 1; 97/*
98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
99 *
100 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
101 * is set, then ftrace_dump is called. This will output the contents
102 * of the ftrace buffers to the console. This is very useful for
103 * capturing traces that lead to crashes and outputing it to a
104 * serial console.
105 *
106 * It is default off, but you can enable it with either specifying
107 * "ftrace_dump_on_oops" in the kernel command line, or setting
108 * /proc/sys/kernel/ftrace_dump_on_oops to true.
109 */
110int ftrace_dump_on_oops;
111
112static int tracing_set_tracer(char *buf);
113
114static int __init set_ftrace(char *str)
115{
116 tracing_set_tracer(str);
117 return 1;
118}
119__setup("ftrace", set_ftrace);
120
121static int __init set_ftrace_dump_on_oops(char *str)
122{
123 ftrace_dump_on_oops = 1;
124 return 1;
125}
126__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
66 127
67long 128long
68ns2usecs(cycle_t nsec) 129ns2usecs(cycle_t nsec)
@@ -112,6 +173,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
112/* tracer_enabled is used to toggle activation of a tracer */ 173/* tracer_enabled is used to toggle activation of a tracer */
113static int tracer_enabled = 1; 174static int tracer_enabled = 1;
114 175
176/**
177 * tracing_is_enabled - return tracer_enabled status
178 *
179 * This function is used by other tracers to know the status
180 * of the tracer_enabled flag. Tracers may use this function
181 * to know if it should enable their features when starting
182 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
183 */
184int tracing_is_enabled(void)
185{
186 return tracer_enabled;
187}
188
115/* function tracing enabled */ 189/* function tracing enabled */
116int ftrace_function_enabled; 190int ftrace_function_enabled;
117 191
@@ -153,8 +227,9 @@ static DEFINE_MUTEX(trace_types_lock);
153/* trace_wait is a waitqueue for tasks blocked on trace_poll */ 227/* trace_wait is a waitqueue for tasks blocked on trace_poll */
154static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 228static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
155 229
156/* trace_flags holds iter_ctrl options */ 230/* trace_flags holds trace_options default values */
157unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 231unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
232 TRACE_ITER_ANNOTATE;
158 233
159/** 234/**
160 * trace_wake_up - wake up tasks waiting for trace input 235 * trace_wake_up - wake up tasks waiting for trace input
@@ -193,13 +268,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
193 return nsecs / 1000; 268 return nsecs / 1000;
194} 269}
195 270
196/*
197 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
198 * control the output of kernel symbols.
199 */
200#define TRACE_ITER_SYM_MASK \
201 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
202
203/* These must match the bit postions in trace_iterator_flags */ 271/* These must match the bit postions in trace_iterator_flags */
204static const char *trace_options[] = { 272static const char *trace_options[] = {
205 "print-parent", 273 "print-parent",
@@ -213,6 +281,12 @@ static const char *trace_options[] = {
213 "stacktrace", 281 "stacktrace",
214 "sched-tree", 282 "sched-tree",
215 "ftrace_printk", 283 "ftrace_printk",
284 "ftrace_preempt",
285 "branch",
286 "annotate",
287 "userstacktrace",
288 "sym-userobj",
289 "printk-msg-only",
216 NULL 290 NULL
217}; 291};
218 292
@@ -246,7 +320,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
246 320
247 memcpy(data->comm, tsk->comm, TASK_COMM_LEN); 321 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
248 data->pid = tsk->pid; 322 data->pid = tsk->pid;
249 data->uid = tsk->uid; 323 data->uid = task_uid(tsk);
250 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 324 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
251 data->policy = tsk->policy; 325 data->policy = tsk->policy;
252 data->rt_priority = tsk->rt_priority; 326 data->rt_priority = tsk->rt_priority;
@@ -359,6 +433,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
359 return trace_seq_putmem(s, hex, j); 433 return trace_seq_putmem(s, hex, j);
360} 434}
361 435
436static int
437trace_seq_path(struct trace_seq *s, struct path *path)
438{
439 unsigned char *p;
440
441 if (s->len >= (PAGE_SIZE - 1))
442 return 0;
443 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
444 if (!IS_ERR(p)) {
445 p = mangle_path(s->buffer + s->len, p, "\n");
446 if (p) {
447 s->len = p - s->buffer;
448 return 1;
449 }
450 } else {
451 s->buffer[s->len++] = '?';
452 return 1;
453 }
454
455 return 0;
456}
457
362static void 458static void
363trace_seq_reset(struct trace_seq *s) 459trace_seq_reset(struct trace_seq *s)
364{ 460{
@@ -470,7 +566,17 @@ int register_tracer(struct tracer *type)
470 return -1; 566 return -1;
471 } 567 }
472 568
569 /*
570 * When this gets called we hold the BKL which means that
571 * preemption is disabled. Various trace selftests however
572 * need to disable and enable preemption for successful tests.
573 * So we drop the BKL here and grab it after the tests again.
574 */
575 unlock_kernel();
473 mutex_lock(&trace_types_lock); 576 mutex_lock(&trace_types_lock);
577
578 tracing_selftest_running = true;
579
474 for (t = trace_types; t; t = t->next) { 580 for (t = trace_types; t; t = t->next) {
475 if (strcmp(type->name, t->name) == 0) { 581 if (strcmp(type->name, t->name) == 0) {
476 /* already found */ 582 /* already found */
@@ -481,12 +587,20 @@ int register_tracer(struct tracer *type)
481 } 587 }
482 } 588 }
483 589
590 if (!type->set_flag)
591 type->set_flag = &dummy_set_flag;
592 if (!type->flags)
593 type->flags = &dummy_tracer_flags;
594 else
595 if (!type->flags->opts)
596 type->flags->opts = dummy_tracer_opt;
597
484#ifdef CONFIG_FTRACE_STARTUP_TEST 598#ifdef CONFIG_FTRACE_STARTUP_TEST
485 if (type->selftest) { 599 if (type->selftest) {
486 struct tracer *saved_tracer = current_trace; 600 struct tracer *saved_tracer = current_trace;
487 struct trace_array *tr = &global_trace; 601 struct trace_array *tr = &global_trace;
488 int saved_ctrl = tr->ctrl;
489 int i; 602 int i;
603
490 /* 604 /*
491 * Run a selftest on this tracer. 605 * Run a selftest on this tracer.
492 * Here we reset the trace buffer, and set the current 606 * Here we reset the trace buffer, and set the current
@@ -494,25 +608,23 @@ int register_tracer(struct tracer *type)
494 * internal tracing to verify that everything is in order. 608 * internal tracing to verify that everything is in order.
495 * If we fail, we do not register this tracer. 609 * If we fail, we do not register this tracer.
496 */ 610 */
497 for_each_tracing_cpu(i) { 611 for_each_tracing_cpu(i)
498 tracing_reset(tr, i); 612 tracing_reset(tr, i);
499 } 613
500 current_trace = type; 614 current_trace = type;
501 tr->ctrl = 0;
502 /* the test is responsible for initializing and enabling */ 615 /* the test is responsible for initializing and enabling */
503 pr_info("Testing tracer %s: ", type->name); 616 pr_info("Testing tracer %s: ", type->name);
504 ret = type->selftest(type, tr); 617 ret = type->selftest(type, tr);
505 /* the test is responsible for resetting too */ 618 /* the test is responsible for resetting too */
506 current_trace = saved_tracer; 619 current_trace = saved_tracer;
507 tr->ctrl = saved_ctrl;
508 if (ret) { 620 if (ret) {
509 printk(KERN_CONT "FAILED!\n"); 621 printk(KERN_CONT "FAILED!\n");
510 goto out; 622 goto out;
511 } 623 }
512 /* Only reset on passing, to avoid touching corrupted buffers */ 624 /* Only reset on passing, to avoid touching corrupted buffers */
513 for_each_tracing_cpu(i) { 625 for_each_tracing_cpu(i)
514 tracing_reset(tr, i); 626 tracing_reset(tr, i);
515 } 627
516 printk(KERN_CONT "PASSED\n"); 628 printk(KERN_CONT "PASSED\n");
517 } 629 }
518#endif 630#endif
@@ -524,7 +636,9 @@ int register_tracer(struct tracer *type)
524 max_tracer_type_len = len; 636 max_tracer_type_len = len;
525 637
526 out: 638 out:
639 tracing_selftest_running = false;
527 mutex_unlock(&trace_types_lock); 640 mutex_unlock(&trace_types_lock);
641 lock_kernel();
528 642
529 return ret; 643 return ret;
530} 644}
@@ -564,6 +678,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
564 ftrace_enable_cpu(); 678 ftrace_enable_cpu();
565} 679}
566 680
681void tracing_reset_online_cpus(struct trace_array *tr)
682{
683 int cpu;
684
685 tr->time_start = ftrace_now(tr->cpu);
686
687 for_each_online_cpu(cpu)
688 tracing_reset(tr, cpu);
689}
690
567#define SAVED_CMDLINES 128 691#define SAVED_CMDLINES 128
568static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 692static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
569static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 693static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -581,6 +705,91 @@ static void trace_init_cmdlines(void)
581 cmdline_idx = 0; 705 cmdline_idx = 0;
582} 706}
583 707
708static int trace_stop_count;
709static DEFINE_SPINLOCK(tracing_start_lock);
710
711/**
712 * ftrace_off_permanent - disable all ftrace code permanently
713 *
714 * This should only be called when a serious anomally has
715 * been detected. This will turn off the function tracing,
716 * ring buffers, and other tracing utilites. It takes no
717 * locks and can be called from any context.
718 */
719void ftrace_off_permanent(void)
720{
721 tracing_disabled = 1;
722 ftrace_stop();
723 tracing_off_permanent();
724}
725
726/**
727 * tracing_start - quick start of the tracer
728 *
729 * If tracing is enabled but was stopped by tracing_stop,
730 * this will start the tracer back up.
731 */
732void tracing_start(void)
733{
734 struct ring_buffer *buffer;
735 unsigned long flags;
736
737 if (tracing_disabled)
738 return;
739
740 spin_lock_irqsave(&tracing_start_lock, flags);
741 if (--trace_stop_count)
742 goto out;
743
744 if (trace_stop_count < 0) {
745 /* Someone screwed up their debugging */
746 WARN_ON_ONCE(1);
747 trace_stop_count = 0;
748 goto out;
749 }
750
751
752 buffer = global_trace.buffer;
753 if (buffer)
754 ring_buffer_record_enable(buffer);
755
756 buffer = max_tr.buffer;
757 if (buffer)
758 ring_buffer_record_enable(buffer);
759
760 ftrace_start();
761 out:
762 spin_unlock_irqrestore(&tracing_start_lock, flags);
763}
764
765/**
766 * tracing_stop - quick stop of the tracer
767 *
768 * Light weight way to stop tracing. Use in conjunction with
769 * tracing_start.
770 */
771void tracing_stop(void)
772{
773 struct ring_buffer *buffer;
774 unsigned long flags;
775
776 ftrace_stop();
777 spin_lock_irqsave(&tracing_start_lock, flags);
778 if (trace_stop_count++)
779 goto out;
780
781 buffer = global_trace.buffer;
782 if (buffer)
783 ring_buffer_record_disable(buffer);
784
785 buffer = max_tr.buffer;
786 if (buffer)
787 ring_buffer_record_disable(buffer);
788
789 out:
790 spin_unlock_irqrestore(&tracing_start_lock, flags);
791}
792
584void trace_stop_cmdline_recording(void); 793void trace_stop_cmdline_recording(void);
585 794
586static void trace_save_cmdline(struct task_struct *tsk) 795static void trace_save_cmdline(struct task_struct *tsk)
@@ -618,7 +827,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
618 spin_unlock(&trace_cmdline_lock); 827 spin_unlock(&trace_cmdline_lock);
619} 828}
620 829
621static char *trace_find_cmdline(int pid) 830char *trace_find_cmdline(int pid)
622{ 831{
623 char *cmdline = "<...>"; 832 char *cmdline = "<...>";
624 unsigned map; 833 unsigned map;
@@ -655,6 +864,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
655 864
656 entry->preempt_count = pc & 0xff; 865 entry->preempt_count = pc & 0xff;
657 entry->pid = (tsk) ? tsk->pid : 0; 866 entry->pid = (tsk) ? tsk->pid : 0;
867 entry->tgid = (tsk) ? tsk->tgid : 0;
658 entry->flags = 868 entry->flags =
659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 869#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 870 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -691,6 +901,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
691 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 901 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
692} 902}
693 903
904#ifdef CONFIG_FUNCTION_GRAPH_TRACER
905static void __trace_graph_entry(struct trace_array *tr,
906 struct trace_array_cpu *data,
907 struct ftrace_graph_ent *trace,
908 unsigned long flags,
909 int pc)
910{
911 struct ring_buffer_event *event;
912 struct ftrace_graph_ent_entry *entry;
913 unsigned long irq_flags;
914
915 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
916 return;
917
918 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
919 &irq_flags);
920 if (!event)
921 return;
922 entry = ring_buffer_event_data(event);
923 tracing_generic_entry_update(&entry->ent, flags, pc);
924 entry->ent.type = TRACE_GRAPH_ENT;
925 entry->graph_ent = *trace;
926 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
927}
928
929static void __trace_graph_return(struct trace_array *tr,
930 struct trace_array_cpu *data,
931 struct ftrace_graph_ret *trace,
932 unsigned long flags,
933 int pc)
934{
935 struct ring_buffer_event *event;
936 struct ftrace_graph_ret_entry *entry;
937 unsigned long irq_flags;
938
939 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
940 return;
941
942 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
943 &irq_flags);
944 if (!event)
945 return;
946 entry = ring_buffer_event_data(event);
947 tracing_generic_entry_update(&entry->ent, flags, pc);
948 entry->ent.type = TRACE_GRAPH_RET;
949 entry->ret = *trace;
950 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
951}
952#endif
953
694void 954void
695ftrace(struct trace_array *tr, struct trace_array_cpu *data, 955ftrace(struct trace_array *tr, struct trace_array_cpu *data,
696 unsigned long ip, unsigned long parent_ip, unsigned long flags, 956 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -742,6 +1002,46 @@ void __trace_stack(struct trace_array *tr,
742 ftrace_trace_stack(tr, data, flags, skip, preempt_count()); 1002 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
743} 1003}
744 1004
1005static void ftrace_trace_userstack(struct trace_array *tr,
1006 struct trace_array_cpu *data,
1007 unsigned long flags, int pc)
1008{
1009#ifdef CONFIG_STACKTRACE
1010 struct ring_buffer_event *event;
1011 struct userstack_entry *entry;
1012 struct stack_trace trace;
1013 unsigned long irq_flags;
1014
1015 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1016 return;
1017
1018 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1019 &irq_flags);
1020 if (!event)
1021 return;
1022 entry = ring_buffer_event_data(event);
1023 tracing_generic_entry_update(&entry->ent, flags, pc);
1024 entry->ent.type = TRACE_USER_STACK;
1025
1026 memset(&entry->caller, 0, sizeof(entry->caller));
1027
1028 trace.nr_entries = 0;
1029 trace.max_entries = FTRACE_STACK_ENTRIES;
1030 trace.skip = 0;
1031 trace.entries = entry->caller;
1032
1033 save_stack_trace_user(&trace);
1034 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1035#endif
1036}
1037
1038void __trace_userstack(struct trace_array *tr,
1039 struct trace_array_cpu *data,
1040 unsigned long flags)
1041{
1042 ftrace_trace_userstack(tr, data, flags, preempt_count());
1043}
1044
745static void 1045static void
746ftrace_trace_special(void *__tr, void *__data, 1046ftrace_trace_special(void *__tr, void *__data,
747 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1047 unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -765,6 +1065,7 @@ ftrace_trace_special(void *__tr, void *__data,
765 entry->arg3 = arg3; 1065 entry->arg3 = arg3;
766 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1066 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
767 ftrace_trace_stack(tr, data, irq_flags, 4, pc); 1067 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
1068 ftrace_trace_userstack(tr, data, irq_flags, pc);
768 1069
769 trace_wake_up(); 1070 trace_wake_up();
770} 1071}
@@ -803,6 +1104,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
803 entry->next_cpu = task_cpu(next); 1104 entry->next_cpu = task_cpu(next);
804 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1105 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
805 ftrace_trace_stack(tr, data, flags, 5, pc); 1106 ftrace_trace_stack(tr, data, flags, 5, pc);
1107 ftrace_trace_userstack(tr, data, flags, pc);
806} 1108}
807 1109
808void 1110void
@@ -832,6 +1134,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
832 entry->next_cpu = task_cpu(wakee); 1134 entry->next_cpu = task_cpu(wakee);
833 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
834 ftrace_trace_stack(tr, data, flags, 6, pc); 1136 ftrace_trace_stack(tr, data, flags, 6, pc);
1137 ftrace_trace_userstack(tr, data, flags, pc);
835 1138
836 trace_wake_up(); 1139 trace_wake_up();
837} 1140}
@@ -841,26 +1144,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
841{ 1144{
842 struct trace_array *tr = &global_trace; 1145 struct trace_array *tr = &global_trace;
843 struct trace_array_cpu *data; 1146 struct trace_array_cpu *data;
1147 unsigned long flags;
844 int cpu; 1148 int cpu;
845 int pc; 1149 int pc;
846 1150
847 if (tracing_disabled || !tr->ctrl) 1151 if (tracing_disabled)
848 return; 1152 return;
849 1153
850 pc = preempt_count(); 1154 pc = preempt_count();
851 preempt_disable_notrace(); 1155 local_irq_save(flags);
852 cpu = raw_smp_processor_id(); 1156 cpu = raw_smp_processor_id();
853 data = tr->data[cpu]; 1157 data = tr->data[cpu];
854 1158
855 if (likely(!atomic_read(&data->disabled))) 1159 if (likely(atomic_inc_return(&data->disabled) == 1))
856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1160 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 1161
858 preempt_enable_notrace(); 1162 atomic_dec(&data->disabled);
1163 local_irq_restore(flags);
859} 1164}
860 1165
861#ifdef CONFIG_FUNCTION_TRACER 1166#ifdef CONFIG_FUNCTION_TRACER
862static void 1167static void
863function_trace_call(unsigned long ip, unsigned long parent_ip) 1168function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
864{ 1169{
865 struct trace_array *tr = &global_trace; 1170 struct trace_array *tr = &global_trace;
866 struct trace_array_cpu *data; 1171 struct trace_array_cpu *data;
@@ -873,8 +1178,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
873 return; 1178 return;
874 1179
875 pc = preempt_count(); 1180 pc = preempt_count();
876 resched = need_resched(); 1181 resched = ftrace_preempt_disable();
877 preempt_disable_notrace();
878 local_save_flags(flags); 1182 local_save_flags(flags);
879 cpu = raw_smp_processor_id(); 1183 cpu = raw_smp_processor_id();
880 data = tr->data[cpu]; 1184 data = tr->data[cpu];
@@ -884,11 +1188,96 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
884 trace_function(tr, data, ip, parent_ip, flags, pc); 1188 trace_function(tr, data, ip, parent_ip, flags, pc);
885 1189
886 atomic_dec(&data->disabled); 1190 atomic_dec(&data->disabled);
887 if (resched) 1191 ftrace_preempt_enable(resched);
888 preempt_enable_no_resched_notrace(); 1192}
889 else 1193
890 preempt_enable_notrace(); 1194static void
1195function_trace_call(unsigned long ip, unsigned long parent_ip)
1196{
1197 struct trace_array *tr = &global_trace;
1198 struct trace_array_cpu *data;
1199 unsigned long flags;
1200 long disabled;
1201 int cpu;
1202 int pc;
1203
1204 if (unlikely(!ftrace_function_enabled))
1205 return;
1206
1207 /*
1208 * Need to use raw, since this must be called before the
1209 * recursive protection is performed.
1210 */
1211 local_irq_save(flags);
1212 cpu = raw_smp_processor_id();
1213 data = tr->data[cpu];
1214 disabled = atomic_inc_return(&data->disabled);
1215
1216 if (likely(disabled == 1)) {
1217 pc = preempt_count();
1218 trace_function(tr, data, ip, parent_ip, flags, pc);
1219 }
1220
1221 atomic_dec(&data->disabled);
1222 local_irq_restore(flags);
1223}
1224
1225#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1226int trace_graph_entry(struct ftrace_graph_ent *trace)
1227{
1228 struct trace_array *tr = &global_trace;
1229 struct trace_array_cpu *data;
1230 unsigned long flags;
1231 long disabled;
1232 int cpu;
1233 int pc;
1234
1235 if (!ftrace_trace_task(current))
1236 return 0;
1237
1238 if (!ftrace_graph_addr(trace->func))
1239 return 0;
1240
1241 local_irq_save(flags);
1242 cpu = raw_smp_processor_id();
1243 data = tr->data[cpu];
1244 disabled = atomic_inc_return(&data->disabled);
1245 if (likely(disabled == 1)) {
1246 pc = preempt_count();
1247 __trace_graph_entry(tr, data, trace, flags, pc);
1248 }
1249 /* Only do the atomic if it is not already set */
1250 if (!test_tsk_trace_graph(current))
1251 set_tsk_trace_graph(current);
1252 atomic_dec(&data->disabled);
1253 local_irq_restore(flags);
1254
1255 return 1;
1256}
1257
1258void trace_graph_return(struct ftrace_graph_ret *trace)
1259{
1260 struct trace_array *tr = &global_trace;
1261 struct trace_array_cpu *data;
1262 unsigned long flags;
1263 long disabled;
1264 int cpu;
1265 int pc;
1266
1267 local_irq_save(flags);
1268 cpu = raw_smp_processor_id();
1269 data = tr->data[cpu];
1270 disabled = atomic_inc_return(&data->disabled);
1271 if (likely(disabled == 1)) {
1272 pc = preempt_count();
1273 __trace_graph_return(tr, data, trace, flags, pc);
1274 }
1275 if (!trace->depth)
1276 clear_tsk_trace_graph(current);
1277 atomic_dec(&data->disabled);
1278 local_irq_restore(flags);
891} 1279}
1280#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
892 1281
893static struct ftrace_ops trace_ops __read_mostly = 1282static struct ftrace_ops trace_ops __read_mostly =
894{ 1283{
@@ -898,9 +1287,14 @@ static struct ftrace_ops trace_ops __read_mostly =
898void tracing_start_function_trace(void) 1287void tracing_start_function_trace(void)
899{ 1288{
900 ftrace_function_enabled = 0; 1289 ftrace_function_enabled = 0;
1290
1291 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1292 trace_ops.func = function_trace_call_preempt_only;
1293 else
1294 trace_ops.func = function_trace_call;
1295
901 register_ftrace_function(&trace_ops); 1296 register_ftrace_function(&trace_ops);
902 if (tracer_enabled) 1297 ftrace_function_enabled = 1;
903 ftrace_function_enabled = 1;
904} 1298}
905 1299
906void tracing_stop_function_trace(void) 1300void tracing_stop_function_trace(void)
@@ -912,9 +1306,10 @@ void tracing_stop_function_trace(void)
912 1306
913enum trace_file_type { 1307enum trace_file_type {
914 TRACE_FILE_LAT_FMT = 1, 1308 TRACE_FILE_LAT_FMT = 1,
1309 TRACE_FILE_ANNOTATE = 2,
915}; 1310};
916 1311
917static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1312static void trace_iterator_increment(struct trace_iterator *iter)
918{ 1313{
919 /* Don't allow ftrace to trace into the ring buffers */ 1314 /* Don't allow ftrace to trace into the ring buffers */
920 ftrace_disable_cpu(); 1315 ftrace_disable_cpu();
@@ -993,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
993 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1388 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
994 1389
995 if (iter->ent) 1390 if (iter->ent)
996 trace_iterator_increment(iter, iter->cpu); 1391 trace_iterator_increment(iter);
997 1392
998 return iter->ent ? iter : NULL; 1393 return iter->ent ? iter : NULL;
999} 1394}
@@ -1047,10 +1442,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1047 1442
1048 atomic_inc(&trace_record_cmdline_disabled); 1443 atomic_inc(&trace_record_cmdline_disabled);
1049 1444
1050 /* let the tracer grab locks here if needed */
1051 if (current_trace->start)
1052 current_trace->start(iter);
1053
1054 if (*pos != iter->pos) { 1445 if (*pos != iter->pos) {
1055 iter->ent = NULL; 1446 iter->ent = NULL;
1056 iter->cpu = 0; 1447 iter->cpu = 0;
@@ -1077,14 +1468,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1077 1468
1078static void s_stop(struct seq_file *m, void *p) 1469static void s_stop(struct seq_file *m, void *p)
1079{ 1470{
1080 struct trace_iterator *iter = m->private;
1081
1082 atomic_dec(&trace_record_cmdline_disabled); 1471 atomic_dec(&trace_record_cmdline_disabled);
1083
1084 /* let the tracer release locks here if needed */
1085 if (current_trace && current_trace == iter->trace && iter->trace->stop)
1086 iter->trace->stop(iter);
1087
1088 mutex_unlock(&trace_types_lock); 1472 mutex_unlock(&trace_types_lock);
1089} 1473}
1090 1474
@@ -1143,7 +1527,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1143# define IP_FMT "%016lx" 1527# define IP_FMT "%016lx"
1144#endif 1528#endif
1145 1529
1146static int 1530int
1147seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 1531seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1148{ 1532{
1149 int ret; 1533 int ret;
@@ -1164,6 +1548,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1164 return ret; 1548 return ret;
1165} 1549}
1166 1550
1551static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
1552 unsigned long ip, unsigned long sym_flags)
1553{
1554 struct file *file = NULL;
1555 unsigned long vmstart = 0;
1556 int ret = 1;
1557
1558 if (mm) {
1559 const struct vm_area_struct *vma;
1560
1561 down_read(&mm->mmap_sem);
1562 vma = find_vma(mm, ip);
1563 if (vma) {
1564 file = vma->vm_file;
1565 vmstart = vma->vm_start;
1566 }
1567 if (file) {
1568 ret = trace_seq_path(s, &file->f_path);
1569 if (ret)
1570 ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
1571 }
1572 up_read(&mm->mmap_sem);
1573 }
1574 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
1575 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1576 return ret;
1577}
1578
1579static int
1580seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
1581 unsigned long sym_flags)
1582{
1583 struct mm_struct *mm = NULL;
1584 int ret = 1;
1585 unsigned int i;
1586
1587 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1588 struct task_struct *task;
1589 /*
1590 * we do the lookup on the thread group leader,
1591 * since individual threads might have already quit!
1592 */
1593 rcu_read_lock();
1594 task = find_task_by_vpid(entry->ent.tgid);
1595 if (task)
1596 mm = get_task_mm(task);
1597 rcu_read_unlock();
1598 }
1599
1600 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1601 unsigned long ip = entry->caller[i];
1602
1603 if (ip == ULONG_MAX || !ret)
1604 break;
1605 if (i && ret)
1606 ret = trace_seq_puts(s, " <- ");
1607 if (!ip) {
1608 if (ret)
1609 ret = trace_seq_puts(s, "??");
1610 continue;
1611 }
1612 if (!ret)
1613 break;
1614 if (ret)
1615 ret = seq_print_user_ip(s, mm, ip, sym_flags);
1616 }
1617
1618 if (mm)
1619 mmput(mm);
1620 return ret;
1621}
1622
1167static void print_lat_help_header(struct seq_file *m) 1623static void print_lat_help_header(struct seq_file *m)
1168{ 1624{
1169 seq_puts(m, "# _------=> CPU# \n"); 1625 seq_puts(m, "# _------=> CPU# \n");
@@ -1301,6 +1757,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1301 1757
1302static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1758static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1303 1759
1760static int task_state_char(unsigned long state)
1761{
1762 int bit = state ? __ffs(state) + 1 : 0;
1763
1764 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1765}
1766
1304/* 1767/*
1305 * The message is supposed to contain an ending newline. 1768 * The message is supposed to contain an ending newline.
1306 * If the printing stops prematurely, try to add a newline of our own. 1769 * If the printing stops prematurely, try to add a newline of our own.
@@ -1338,6 +1801,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1338 trace_seq_putc(s, '\n'); 1801 trace_seq_putc(s, '\n');
1339} 1802}
1340 1803
1804static void test_cpu_buff_start(struct trace_iterator *iter)
1805{
1806 struct trace_seq *s = &iter->seq;
1807
1808 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1809 return;
1810
1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1812 return;
1813
1814 if (cpu_isset(iter->cpu, iter->started))
1815 return;
1816
1817 cpu_set(iter->cpu, iter->started);
1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1819}
1820
1341static enum print_line_t 1821static enum print_line_t
1342print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1822print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1343{ 1823{
@@ -1352,11 +1832,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1352 char *comm; 1832 char *comm;
1353 int S, T; 1833 int S, T;
1354 int i; 1834 int i;
1355 unsigned state;
1356 1835
1357 if (entry->type == TRACE_CONT) 1836 if (entry->type == TRACE_CONT)
1358 return TRACE_TYPE_HANDLED; 1837 return TRACE_TYPE_HANDLED;
1359 1838
1839 test_cpu_buff_start(iter);
1840
1360 next_entry = find_next_entry(iter, NULL, &next_ts); 1841 next_entry = find_next_entry(iter, NULL, &next_ts);
1361 if (!next_entry) 1842 if (!next_entry)
1362 next_ts = iter->ts; 1843 next_ts = iter->ts;
@@ -1396,12 +1877,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1396 1877
1397 trace_assign_type(field, entry); 1878 trace_assign_type(field, entry);
1398 1879
1399 T = field->next_state < sizeof(state_to_char) ? 1880 T = task_state_char(field->next_state);
1400 state_to_char[field->next_state] : 'X'; 1881 S = task_state_char(field->prev_state);
1401
1402 state = field->prev_state ?
1403 __ffs(field->prev_state) + 1 : 0;
1404 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1405 comm = trace_find_cmdline(field->next_pid); 1882 comm = trace_find_cmdline(field->next_pid);
1406 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1883 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1407 field->prev_pid, 1884 field->prev_pid,
@@ -1448,6 +1925,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1448 trace_seq_print_cont(s, iter); 1925 trace_seq_print_cont(s, iter);
1449 break; 1926 break;
1450 } 1927 }
1928 case TRACE_BRANCH: {
1929 struct trace_branch *field;
1930
1931 trace_assign_type(field, entry);
1932
1933 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1934 field->correct ? " ok " : " MISS ",
1935 field->func,
1936 field->file,
1937 field->line);
1938 break;
1939 }
1940 case TRACE_USER_STACK: {
1941 struct userstack_entry *field;
1942
1943 trace_assign_type(field, entry);
1944
1945 seq_print_userip_objs(field, s, sym_flags);
1946 trace_seq_putc(s, '\n');
1947 break;
1948 }
1451 default: 1949 default:
1452 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1950 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1453 } 1951 }
@@ -1472,6 +1970,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1472 if (entry->type == TRACE_CONT) 1970 if (entry->type == TRACE_CONT)
1473 return TRACE_TYPE_HANDLED; 1971 return TRACE_TYPE_HANDLED;
1474 1972
1973 test_cpu_buff_start(iter);
1974
1475 comm = trace_find_cmdline(iter->ent->pid); 1975 comm = trace_find_cmdline(iter->ent->pid);
1476 1976
1477 t = ns2usecs(iter->ts); 1977 t = ns2usecs(iter->ts);
@@ -1519,10 +2019,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1519 2019
1520 trace_assign_type(field, entry); 2020 trace_assign_type(field, entry);
1521 2021
1522 S = field->prev_state < sizeof(state_to_char) ? 2022 T = task_state_char(field->next_state);
1523 state_to_char[field->prev_state] : 'X'; 2023 S = task_state_char(field->prev_state);
1524 T = field->next_state < sizeof(state_to_char) ?
1525 state_to_char[field->next_state] : 'X';
1526 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", 2024 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1527 field->prev_pid, 2025 field->prev_pid,
1528 field->prev_prio, 2026 field->prev_prio,
@@ -1581,6 +2079,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1581 trace_seq_print_cont(s, iter); 2079 trace_seq_print_cont(s, iter);
1582 break; 2080 break;
1583 } 2081 }
2082 case TRACE_GRAPH_RET: {
2083 return print_graph_function(iter);
2084 }
2085 case TRACE_GRAPH_ENT: {
2086 return print_graph_function(iter);
2087 }
2088 case TRACE_BRANCH: {
2089 struct trace_branch *field;
2090
2091 trace_assign_type(field, entry);
2092
2093 trace_seq_printf(s, "[%s] %s:%s:%d\n",
2094 field->correct ? " ok " : " MISS ",
2095 field->func,
2096 field->file,
2097 field->line);
2098 break;
2099 }
2100 case TRACE_USER_STACK: {
2101 struct userstack_entry *field;
2102
2103 trace_assign_type(field, entry);
2104
2105 ret = seq_print_userip_objs(field, s, sym_flags);
2106 if (!ret)
2107 return TRACE_TYPE_PARTIAL_LINE;
2108 ret = trace_seq_putc(s, '\n');
2109 if (!ret)
2110 return TRACE_TYPE_PARTIAL_LINE;
2111 break;
2112 }
1584 } 2113 }
1585 return TRACE_TYPE_HANDLED; 2114 return TRACE_TYPE_HANDLED;
1586} 2115}
@@ -1621,12 +2150,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1621 2150
1622 trace_assign_type(field, entry); 2151 trace_assign_type(field, entry);
1623 2152
1624 S = field->prev_state < sizeof(state_to_char) ? 2153 T = task_state_char(field->next_state);
1625 state_to_char[field->prev_state] : 'X'; 2154 S = entry->type == TRACE_WAKE ? '+' :
1626 T = field->next_state < sizeof(state_to_char) ? 2155 task_state_char(field->prev_state);
1627 state_to_char[field->next_state] : 'X';
1628 if (entry->type == TRACE_WAKE)
1629 S = '+';
1630 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", 2156 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1631 field->prev_pid, 2157 field->prev_pid,
1632 field->prev_prio, 2158 field->prev_prio,
@@ -1640,6 +2166,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1640 break; 2166 break;
1641 } 2167 }
1642 case TRACE_SPECIAL: 2168 case TRACE_SPECIAL:
2169 case TRACE_USER_STACK:
1643 case TRACE_STACK: { 2170 case TRACE_STACK: {
1644 struct special_entry *field; 2171 struct special_entry *field;
1645 2172
@@ -1712,12 +2239,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1712 2239
1713 trace_assign_type(field, entry); 2240 trace_assign_type(field, entry);
1714 2241
1715 S = field->prev_state < sizeof(state_to_char) ? 2242 T = task_state_char(field->next_state);
1716 state_to_char[field->prev_state] : 'X'; 2243 S = entry->type == TRACE_WAKE ? '+' :
1717 T = field->next_state < sizeof(state_to_char) ? 2244 task_state_char(field->prev_state);
1718 state_to_char[field->next_state] : 'X';
1719 if (entry->type == TRACE_WAKE)
1720 S = '+';
1721 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 2245 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1722 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 2246 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1723 SEQ_PUT_HEX_FIELD_RET(s, S); 2247 SEQ_PUT_HEX_FIELD_RET(s, S);
@@ -1728,6 +2252,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1728 break; 2252 break;
1729 } 2253 }
1730 case TRACE_SPECIAL: 2254 case TRACE_SPECIAL:
2255 case TRACE_USER_STACK:
1731 case TRACE_STACK: { 2256 case TRACE_STACK: {
1732 struct special_entry *field; 2257 struct special_entry *field;
1733 2258
@@ -1744,6 +2269,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1744 return TRACE_TYPE_HANDLED; 2269 return TRACE_TYPE_HANDLED;
1745} 2270}
1746 2271
2272static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
2273{
2274 struct trace_seq *s = &iter->seq;
2275 struct trace_entry *entry = iter->ent;
2276 struct print_entry *field;
2277 int ret;
2278
2279 trace_assign_type(field, entry);
2280
2281 ret = trace_seq_printf(s, field->buf);
2282 if (!ret)
2283 return TRACE_TYPE_PARTIAL_LINE;
2284
2285 if (entry->flags & TRACE_FLAG_CONT)
2286 trace_seq_print_cont(s, iter);
2287
2288 return TRACE_TYPE_HANDLED;
2289}
2290
1747static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2291static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1748{ 2292{
1749 struct trace_seq *s = &iter->seq; 2293 struct trace_seq *s = &iter->seq;
@@ -1782,6 +2326,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1782 break; 2326 break;
1783 } 2327 }
1784 case TRACE_SPECIAL: 2328 case TRACE_SPECIAL:
2329 case TRACE_USER_STACK:
1785 case TRACE_STACK: { 2330 case TRACE_STACK: {
1786 struct special_entry *field; 2331 struct special_entry *field;
1787 2332
@@ -1823,6 +2368,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1823 return ret; 2368 return ret;
1824 } 2369 }
1825 2370
2371 if (iter->ent->type == TRACE_PRINT &&
2372 trace_flags & TRACE_ITER_PRINTK &&
2373 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2374 return print_printk_msg_only(iter);
2375
1826 if (trace_flags & TRACE_ITER_BIN) 2376 if (trace_flags & TRACE_ITER_BIN)
1827 return print_bin_fmt(iter); 2377 return print_bin_fmt(iter);
1828 2378
@@ -1847,7 +2397,9 @@ static int s_show(struct seq_file *m, void *v)
1847 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2397 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1848 seq_puts(m, "#\n"); 2398 seq_puts(m, "#\n");
1849 } 2399 }
1850 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2400 if (iter->trace && iter->trace->print_header)
2401 iter->trace->print_header(m);
2402 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1851 /* print nothing if the buffers are empty */ 2403 /* print nothing if the buffers are empty */
1852 if (trace_empty(iter)) 2404 if (trace_empty(iter))
1853 return 0; 2405 return 0;
@@ -1899,6 +2451,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1899 iter->trace = current_trace; 2451 iter->trace = current_trace;
1900 iter->pos = -1; 2452 iter->pos = -1;
1901 2453
2454 /* Notify the tracer early; before we stop tracing. */
2455 if (iter->trace && iter->trace->open)
2456 iter->trace->open(iter);
2457
2458 /* Annotate start of buffers if we had overruns */
2459 if (ring_buffer_overruns(iter->tr->buffer))
2460 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2461
2462
1902 for_each_tracing_cpu(cpu) { 2463 for_each_tracing_cpu(cpu) {
1903 2464
1904 iter->buffer_iter[cpu] = 2465 iter->buffer_iter[cpu] =
@@ -1917,13 +2478,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1917 m->private = iter; 2478 m->private = iter;
1918 2479
1919 /* stop the trace while dumping */ 2480 /* stop the trace while dumping */
1920 if (iter->tr->ctrl) { 2481 tracing_stop();
1921 tracer_enabled = 0;
1922 ftrace_function_enabled = 0;
1923 }
1924
1925 if (iter->trace && iter->trace->open)
1926 iter->trace->open(iter);
1927 2482
1928 mutex_unlock(&trace_types_lock); 2483 mutex_unlock(&trace_types_lock);
1929 2484
@@ -1966,14 +2521,7 @@ int tracing_release(struct inode *inode, struct file *file)
1966 iter->trace->close(iter); 2521 iter->trace->close(iter);
1967 2522
1968 /* reenable tracing if it was previously enabled */ 2523 /* reenable tracing if it was previously enabled */
1969 if (iter->tr->ctrl) { 2524 tracing_start();
1970 tracer_enabled = 1;
1971 /*
1972 * It is safe to enable function tracing even if it
1973 * isn't used
1974 */
1975 ftrace_function_enabled = 1;
1976 }
1977 mutex_unlock(&trace_types_lock); 2525 mutex_unlock(&trace_types_lock);
1978 2526
1979 seq_release(inode, file); 2527 seq_release(inode, file);
@@ -2151,7 +2699,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2151 if (err) 2699 if (err)
2152 goto err_unlock; 2700 goto err_unlock;
2153 2701
2154 raw_local_irq_disable(); 2702 local_irq_disable();
2155 __raw_spin_lock(&ftrace_max_lock); 2703 __raw_spin_lock(&ftrace_max_lock);
2156 for_each_tracing_cpu(cpu) { 2704 for_each_tracing_cpu(cpu) {
2157 /* 2705 /*
@@ -2168,7 +2716,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2168 } 2716 }
2169 } 2717 }
2170 __raw_spin_unlock(&ftrace_max_lock); 2718 __raw_spin_unlock(&ftrace_max_lock);
2171 raw_local_irq_enable(); 2719 local_irq_enable();
2172 2720
2173 tracing_cpumask = tracing_cpumask_new; 2721 tracing_cpumask = tracing_cpumask_new;
2174 2722
@@ -2189,13 +2737,16 @@ static struct file_operations tracing_cpumask_fops = {
2189}; 2737};
2190 2738
2191static ssize_t 2739static ssize_t
2192tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, 2740tracing_trace_options_read(struct file *filp, char __user *ubuf,
2193 size_t cnt, loff_t *ppos) 2741 size_t cnt, loff_t *ppos)
2194{ 2742{
2743 int i;
2195 char *buf; 2744 char *buf;
2196 int r = 0; 2745 int r = 0;
2197 int len = 0; 2746 int len = 0;
2198 int i; 2747 u32 tracer_flags = current_trace->flags->val;
2748 struct tracer_opt *trace_opts = current_trace->flags->opts;
2749
2199 2750
2200 /* calulate max size */ 2751 /* calulate max size */
2201 for (i = 0; trace_options[i]; i++) { 2752 for (i = 0; trace_options[i]; i++) {
@@ -2203,6 +2754,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2203 len += 3; /* "no" and space */ 2754 len += 3; /* "no" and space */
2204 } 2755 }
2205 2756
2757 /*
2758 * Increase the size with names of options specific
2759 * of the current tracer.
2760 */
2761 for (i = 0; trace_opts[i].name; i++) {
2762 len += strlen(trace_opts[i].name);
2763 len += 3; /* "no" and space */
2764 }
2765
2206 /* +2 for \n and \0 */ 2766 /* +2 for \n and \0 */
2207 buf = kmalloc(len + 2, GFP_KERNEL); 2767 buf = kmalloc(len + 2, GFP_KERNEL);
2208 if (!buf) 2768 if (!buf)
@@ -2215,6 +2775,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2215 r += sprintf(buf + r, "no%s ", trace_options[i]); 2775 r += sprintf(buf + r, "no%s ", trace_options[i]);
2216 } 2776 }
2217 2777
2778 for (i = 0; trace_opts[i].name; i++) {
2779 if (tracer_flags & trace_opts[i].bit)
2780 r += sprintf(buf + r, "%s ",
2781 trace_opts[i].name);
2782 else
2783 r += sprintf(buf + r, "no%s ",
2784 trace_opts[i].name);
2785 }
2786
2218 r += sprintf(buf + r, "\n"); 2787 r += sprintf(buf + r, "\n");
2219 WARN_ON(r >= len + 2); 2788 WARN_ON(r >= len + 2);
2220 2789
@@ -2225,13 +2794,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2225 return r; 2794 return r;
2226} 2795}
2227 2796
2797/* Try to assign a tracer specific option */
2798static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2799{
2800 struct tracer_flags *trace_flags = trace->flags;
2801 struct tracer_opt *opts = NULL;
2802 int ret = 0, i = 0;
2803 int len;
2804
2805 for (i = 0; trace_flags->opts[i].name; i++) {
2806 opts = &trace_flags->opts[i];
2807 len = strlen(opts->name);
2808
2809 if (strncmp(cmp, opts->name, len) == 0) {
2810 ret = trace->set_flag(trace_flags->val,
2811 opts->bit, !neg);
2812 break;
2813 }
2814 }
2815 /* Not found */
2816 if (!trace_flags->opts[i].name)
2817 return -EINVAL;
2818
2819 /* Refused to handle */
2820 if (ret)
2821 return ret;
2822
2823 if (neg)
2824 trace_flags->val &= ~opts->bit;
2825 else
2826 trace_flags->val |= opts->bit;
2827
2828 return 0;
2829}
2830
2228static ssize_t 2831static ssize_t
2229tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, 2832tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2230 size_t cnt, loff_t *ppos) 2833 size_t cnt, loff_t *ppos)
2231{ 2834{
2232 char buf[64]; 2835 char buf[64];
2233 char *cmp = buf; 2836 char *cmp = buf;
2234 int neg = 0; 2837 int neg = 0;
2838 int ret;
2235 int i; 2839 int i;
2236 2840
2237 if (cnt >= sizeof(buf)) 2841 if (cnt >= sizeof(buf))
@@ -2258,11 +2862,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2258 break; 2862 break;
2259 } 2863 }
2260 } 2864 }
2261 /* 2865
2262 * If no option could be set, return an error: 2866 /* If no option could be set, test the specific tracer options */
2263 */ 2867 if (!trace_options[i]) {
2264 if (!trace_options[i]) 2868 ret = set_tracer_option(current_trace, cmp, neg);
2265 return -EINVAL; 2869 if (ret)
2870 return ret;
2871 }
2266 2872
2267 filp->f_pos += cnt; 2873 filp->f_pos += cnt;
2268 2874
@@ -2271,8 +2877,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2271 2877
2272static struct file_operations tracing_iter_fops = { 2878static struct file_operations tracing_iter_fops = {
2273 .open = tracing_open_generic, 2879 .open = tracing_open_generic,
2274 .read = tracing_iter_ctrl_read, 2880 .read = tracing_trace_options_read,
2275 .write = tracing_iter_ctrl_write, 2881 .write = tracing_trace_options_write,
2276}; 2882};
2277 2883
2278static const char readme_msg[] = 2884static const char readme_msg[] =
@@ -2286,9 +2892,9 @@ static const char readme_msg[] =
2286 "# echo sched_switch > /debug/tracing/current_tracer\n" 2892 "# echo sched_switch > /debug/tracing/current_tracer\n"
2287 "# cat /debug/tracing/current_tracer\n" 2893 "# cat /debug/tracing/current_tracer\n"
2288 "sched_switch\n" 2894 "sched_switch\n"
2289 "# cat /debug/tracing/iter_ctrl\n" 2895 "# cat /debug/tracing/trace_options\n"
2290 "noprint-parent nosym-offset nosym-addr noverbose\n" 2896 "noprint-parent nosym-offset nosym-addr noverbose\n"
2291 "# echo print-parent > /debug/tracing/iter_ctrl\n" 2897 "# echo print-parent > /debug/tracing/trace_options\n"
2292 "# echo 1 > /debug/tracing/tracing_enabled\n" 2898 "# echo 1 > /debug/tracing/tracing_enabled\n"
2293 "# cat /debug/tracing/trace > /tmp/trace.txt\n" 2899 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2294 "echo 0 > /debug/tracing/tracing_enabled\n" 2900 "echo 0 > /debug/tracing/tracing_enabled\n"
@@ -2311,11 +2917,10 @@ static ssize_t
2311tracing_ctrl_read(struct file *filp, char __user *ubuf, 2917tracing_ctrl_read(struct file *filp, char __user *ubuf,
2312 size_t cnt, loff_t *ppos) 2918 size_t cnt, loff_t *ppos)
2313{ 2919{
2314 struct trace_array *tr = filp->private_data;
2315 char buf[64]; 2920 char buf[64];
2316 int r; 2921 int r;
2317 2922
2318 r = sprintf(buf, "%ld\n", tr->ctrl); 2923 r = sprintf(buf, "%u\n", tracer_enabled);
2319 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2924 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2320} 2925}
2321 2926
@@ -2343,16 +2948,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2343 val = !!val; 2948 val = !!val;
2344 2949
2345 mutex_lock(&trace_types_lock); 2950 mutex_lock(&trace_types_lock);
2346 if (tr->ctrl ^ val) { 2951 if (tracer_enabled ^ val) {
2347 if (val) 2952 if (val) {
2348 tracer_enabled = 1; 2953 tracer_enabled = 1;
2349 else 2954 if (current_trace->start)
2955 current_trace->start(tr);
2956 tracing_start();
2957 } else {
2350 tracer_enabled = 0; 2958 tracer_enabled = 0;
2351 2959 tracing_stop();
2352 tr->ctrl = val; 2960 if (current_trace->stop)
2353 2961 current_trace->stop(tr);
2354 if (current_trace && current_trace->ctrl_update) 2962 }
2355 current_trace->ctrl_update(tr);
2356 } 2963 }
2357 mutex_unlock(&trace_types_lock); 2964 mutex_unlock(&trace_types_lock);
2358 2965
@@ -2378,29 +2985,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
2378 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2379} 2986}
2380 2987
2381static ssize_t 2988static int tracing_set_tracer(char *buf)
2382tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 size_t cnt, loff_t *ppos)
2384{ 2989{
2385 struct trace_array *tr = &global_trace; 2990 struct trace_array *tr = &global_trace;
2386 struct tracer *t; 2991 struct tracer *t;
2387 char buf[max_tracer_type_len+1]; 2992 int ret = 0;
2388 int i;
2389 size_t ret;
2390
2391 ret = cnt;
2392
2393 if (cnt > max_tracer_type_len)
2394 cnt = max_tracer_type_len;
2395
2396 if (copy_from_user(&buf, ubuf, cnt))
2397 return -EFAULT;
2398
2399 buf[cnt] = 0;
2400
2401 /* strip ending whitespace. */
2402 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2403 buf[i] = 0;
2404 2993
2405 mutex_lock(&trace_types_lock); 2994 mutex_lock(&trace_types_lock);
2406 for (t = trace_types; t; t = t->next) { 2995 for (t = trace_types; t; t = t->next) {
@@ -2414,18 +3003,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2414 if (t == current_trace) 3003 if (t == current_trace)
2415 goto out; 3004 goto out;
2416 3005
3006 trace_branch_disable();
2417 if (current_trace && current_trace->reset) 3007 if (current_trace && current_trace->reset)
2418 current_trace->reset(tr); 3008 current_trace->reset(tr);
2419 3009
2420 current_trace = t; 3010 current_trace = t;
2421 if (t->init) 3011 if (t->init) {
2422 t->init(tr); 3012 ret = t->init(tr);
3013 if (ret)
3014 goto out;
3015 }
2423 3016
3017 trace_branch_enable(tr);
2424 out: 3018 out:
2425 mutex_unlock(&trace_types_lock); 3019 mutex_unlock(&trace_types_lock);
2426 3020
2427 if (ret > 0) 3021 return ret;
2428 filp->f_pos += ret; 3022}
3023
3024static ssize_t
3025tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3026 size_t cnt, loff_t *ppos)
3027{
3028 char buf[max_tracer_type_len+1];
3029 int i;
3030 size_t ret;
3031 int err;
3032
3033 ret = cnt;
3034
3035 if (cnt > max_tracer_type_len)
3036 cnt = max_tracer_type_len;
3037
3038 if (copy_from_user(&buf, ubuf, cnt))
3039 return -EFAULT;
3040
3041 buf[cnt] = 0;
3042
3043 /* strip ending whitespace. */
3044 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3045 buf[i] = 0;
3046
3047 err = tracing_set_tracer(buf);
3048 if (err)
3049 return err;
3050
3051 filp->f_pos += ret;
2429 3052
2430 return ret; 3053 return ret;
2431} 3054}
@@ -2492,6 +3115,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2492 return -ENOMEM; 3115 return -ENOMEM;
2493 3116
2494 mutex_lock(&trace_types_lock); 3117 mutex_lock(&trace_types_lock);
3118
3119 /* trace pipe does not show start of buffer */
3120 cpus_setall(iter->started);
3121
2495 iter->tr = &global_trace; 3122 iter->tr = &global_trace;
2496 iter->trace = current_trace; 3123 iter->trace = current_trace;
2497 filp->private_data = iter; 3124 filp->private_data = iter;
@@ -2667,7 +3294,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
2667 char buf[64]; 3294 char buf[64];
2668 int r; 3295 int r;
2669 3296
2670 r = sprintf(buf, "%lu\n", tr->entries); 3297 r = sprintf(buf, "%lu\n", tr->entries >> 10);
2671 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3298 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2672} 3299}
2673 3300
@@ -2678,7 +3305,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2678 unsigned long val; 3305 unsigned long val;
2679 char buf[64]; 3306 char buf[64];
2680 int ret, cpu; 3307 int ret, cpu;
2681 struct trace_array *tr = filp->private_data;
2682 3308
2683 if (cnt >= sizeof(buf)) 3309 if (cnt >= sizeof(buf))
2684 return -EINVAL; 3310 return -EINVAL;
@@ -2698,12 +3324,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2698 3324
2699 mutex_lock(&trace_types_lock); 3325 mutex_lock(&trace_types_lock);
2700 3326
2701 if (tr->ctrl) { 3327 tracing_stop();
2702 cnt = -EBUSY;
2703 pr_info("ftrace: please disable tracing"
2704 " before modifying buffer size\n");
2705 goto out;
2706 }
2707 3328
2708 /* disable all cpu buffers */ 3329 /* disable all cpu buffers */
2709 for_each_tracing_cpu(cpu) { 3330 for_each_tracing_cpu(cpu) {
@@ -2713,6 +3334,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2713 atomic_inc(&max_tr.data[cpu]->disabled); 3334 atomic_inc(&max_tr.data[cpu]->disabled);
2714 } 3335 }
2715 3336
3337 /* value is in KB */
3338 val <<= 10;
3339
2716 if (val != global_trace.entries) { 3340 if (val != global_trace.entries) {
2717 ret = ring_buffer_resize(global_trace.buffer, val); 3341 ret = ring_buffer_resize(global_trace.buffer, val);
2718 if (ret < 0) { 3342 if (ret < 0) {
@@ -2751,6 +3375,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2751 atomic_dec(&max_tr.data[cpu]->disabled); 3375 atomic_dec(&max_tr.data[cpu]->disabled);
2752 } 3376 }
2753 3377
3378 tracing_start();
2754 max_tr.entries = global_trace.entries; 3379 max_tr.entries = global_trace.entries;
2755 mutex_unlock(&trace_types_lock); 3380 mutex_unlock(&trace_types_lock);
2756 3381
@@ -2762,7 +3387,7 @@ static int mark_printk(const char *fmt, ...)
2762 int ret; 3387 int ret;
2763 va_list args; 3388 va_list args;
2764 va_start(args, fmt); 3389 va_start(args, fmt);
2765 ret = trace_vprintk(0, fmt, args); 3390 ret = trace_vprintk(0, -1, fmt, args);
2766 va_end(args); 3391 va_end(args);
2767 return ret; 3392 return ret;
2768} 3393}
@@ -2773,9 +3398,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
2773{ 3398{
2774 char *buf; 3399 char *buf;
2775 char *end; 3400 char *end;
2776 struct trace_array *tr = &global_trace;
2777 3401
2778 if (!tr->ctrl || tracing_disabled) 3402 if (tracing_disabled)
2779 return -EINVAL; 3403 return -EINVAL;
2780 3404
2781 if (cnt > TRACE_BUF_SIZE) 3405 if (cnt > TRACE_BUF_SIZE)
@@ -2841,22 +3465,38 @@ static struct file_operations tracing_mark_fops = {
2841 3465
2842#ifdef CONFIG_DYNAMIC_FTRACE 3466#ifdef CONFIG_DYNAMIC_FTRACE
2843 3467
3468int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3469{
3470 return 0;
3471}
3472
2844static ssize_t 3473static ssize_t
2845tracing_read_long(struct file *filp, char __user *ubuf, 3474tracing_read_dyn_info(struct file *filp, char __user *ubuf,
2846 size_t cnt, loff_t *ppos) 3475 size_t cnt, loff_t *ppos)
2847{ 3476{
3477 static char ftrace_dyn_info_buffer[1024];
3478 static DEFINE_MUTEX(dyn_info_mutex);
2848 unsigned long *p = filp->private_data; 3479 unsigned long *p = filp->private_data;
2849 char buf[64]; 3480 char *buf = ftrace_dyn_info_buffer;
3481 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
2850 int r; 3482 int r;
2851 3483
2852 r = sprintf(buf, "%ld\n", *p); 3484 mutex_lock(&dyn_info_mutex);
3485 r = sprintf(buf, "%ld ", *p);
2853 3486
2854 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3487 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
3488 buf[r++] = '\n';
3489
3490 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3491
3492 mutex_unlock(&dyn_info_mutex);
3493
3494 return r;
2855} 3495}
2856 3496
2857static struct file_operations tracing_read_long_fops = { 3497static struct file_operations tracing_dyn_info_fops = {
2858 .open = tracing_open_generic, 3498 .open = tracing_open_generic,
2859 .read = tracing_read_long, 3499 .read = tracing_read_dyn_info,
2860}; 3500};
2861#endif 3501#endif
2862 3502
@@ -2897,10 +3537,10 @@ static __init int tracer_init_debugfs(void)
2897 if (!entry) 3537 if (!entry)
2898 pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); 3538 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2899 3539
2900 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, 3540 entry = debugfs_create_file("trace_options", 0644, d_tracer,
2901 NULL, &tracing_iter_fops); 3541 NULL, &tracing_iter_fops);
2902 if (!entry) 3542 if (!entry)
2903 pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); 3543 pr_warning("Could not create debugfs 'trace_options' entry\n");
2904 3544
2905 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, 3545 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2906 NULL, &tracing_cpumask_fops); 3546 NULL, &tracing_cpumask_fops);
@@ -2950,11 +3590,11 @@ static __init int tracer_init_debugfs(void)
2950 pr_warning("Could not create debugfs " 3590 pr_warning("Could not create debugfs "
2951 "'trace_pipe' entry\n"); 3591 "'trace_pipe' entry\n");
2952 3592
2953 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 3593 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
2954 &global_trace, &tracing_entries_fops); 3594 &global_trace, &tracing_entries_fops);
2955 if (!entry) 3595 if (!entry)
2956 pr_warning("Could not create debugfs " 3596 pr_warning("Could not create debugfs "
2957 "'trace_entries' entry\n"); 3597 "'buffer_size_kb' entry\n");
2958 3598
2959 entry = debugfs_create_file("trace_marker", 0220, d_tracer, 3599 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2960 NULL, &tracing_mark_fops); 3600 NULL, &tracing_mark_fops);
@@ -2965,7 +3605,7 @@ static __init int tracer_init_debugfs(void)
2965#ifdef CONFIG_DYNAMIC_FTRACE 3605#ifdef CONFIG_DYNAMIC_FTRACE
2966 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 3606 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2967 &ftrace_update_tot_cnt, 3607 &ftrace_update_tot_cnt,
2968 &tracing_read_long_fops); 3608 &tracing_dyn_info_fops);
2969 if (!entry) 3609 if (!entry)
2970 pr_warning("Could not create debugfs " 3610 pr_warning("Could not create debugfs "
2971 "'dyn_ftrace_total_info' entry\n"); 3611 "'dyn_ftrace_total_info' entry\n");
@@ -2976,7 +3616,7 @@ static __init int tracer_init_debugfs(void)
2976 return 0; 3616 return 0;
2977} 3617}
2978 3618
2979int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3619int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
2980{ 3620{
2981 static DEFINE_SPINLOCK(trace_buf_lock); 3621 static DEFINE_SPINLOCK(trace_buf_lock);
2982 static char trace_buf[TRACE_BUF_SIZE]; 3622 static char trace_buf[TRACE_BUF_SIZE];
@@ -2984,11 +3624,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2984 struct ring_buffer_event *event; 3624 struct ring_buffer_event *event;
2985 struct trace_array *tr = &global_trace; 3625 struct trace_array *tr = &global_trace;
2986 struct trace_array_cpu *data; 3626 struct trace_array_cpu *data;
2987 struct print_entry *entry;
2988 unsigned long flags, irq_flags;
2989 int cpu, len = 0, size, pc; 3627 int cpu, len = 0, size, pc;
3628 struct print_entry *entry;
3629 unsigned long irq_flags;
2990 3630
2991 if (!tr->ctrl || tracing_disabled) 3631 if (tracing_disabled || tracing_selftest_running)
2992 return 0; 3632 return 0;
2993 3633
2994 pc = preempt_count(); 3634 pc = preempt_count();
@@ -2999,7 +3639,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2999 if (unlikely(atomic_read(&data->disabled))) 3639 if (unlikely(atomic_read(&data->disabled)))
3000 goto out; 3640 goto out;
3001 3641
3002 spin_lock_irqsave(&trace_buf_lock, flags); 3642 pause_graph_tracing();
3643 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3003 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3644 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3004 3645
3005 len = min(len, TRACE_BUF_SIZE-1); 3646 len = min(len, TRACE_BUF_SIZE-1);
@@ -3010,17 +3651,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3010 if (!event) 3651 if (!event)
3011 goto out_unlock; 3652 goto out_unlock;
3012 entry = ring_buffer_event_data(event); 3653 entry = ring_buffer_event_data(event);
3013 tracing_generic_entry_update(&entry->ent, flags, pc); 3654 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3014 entry->ent.type = TRACE_PRINT; 3655 entry->ent.type = TRACE_PRINT;
3015 entry->ip = ip; 3656 entry->ip = ip;
3657 entry->depth = depth;
3016 3658
3017 memcpy(&entry->buf, trace_buf, len); 3659 memcpy(&entry->buf, trace_buf, len);
3018 entry->buf[len] = 0; 3660 entry->buf[len] = 0;
3019 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3661 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3020 3662
3021 out_unlock: 3663 out_unlock:
3022 spin_unlock_irqrestore(&trace_buf_lock, flags); 3664 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3023 3665 unpause_graph_tracing();
3024 out: 3666 out:
3025 preempt_enable_notrace(); 3667 preempt_enable_notrace();
3026 3668
@@ -3037,7 +3679,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3037 return 0; 3679 return 0;
3038 3680
3039 va_start(ap, fmt); 3681 va_start(ap, fmt);
3040 ret = trace_vprintk(ip, fmt, ap); 3682 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
3041 va_end(ap); 3683 va_end(ap);
3042 return ret; 3684 return ret;
3043} 3685}
@@ -3046,7 +3688,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk);
3046static int trace_panic_handler(struct notifier_block *this, 3688static int trace_panic_handler(struct notifier_block *this,
3047 unsigned long event, void *unused) 3689 unsigned long event, void *unused)
3048{ 3690{
3049 ftrace_dump(); 3691 if (ftrace_dump_on_oops)
3692 ftrace_dump();
3050 return NOTIFY_OK; 3693 return NOTIFY_OK;
3051} 3694}
3052 3695
@@ -3062,7 +3705,8 @@ static int trace_die_handler(struct notifier_block *self,
3062{ 3705{
3063 switch (val) { 3706 switch (val) {
3064 case DIE_OOPS: 3707 case DIE_OOPS:
3065 ftrace_dump(); 3708 if (ftrace_dump_on_oops)
3709 ftrace_dump();
3066 break; 3710 break;
3067 default: 3711 default:
3068 break; 3712 break;
@@ -3103,7 +3747,6 @@ trace_printk_seq(struct trace_seq *s)
3103 trace_seq_reset(s); 3747 trace_seq_reset(s);
3104} 3748}
3105 3749
3106
3107void ftrace_dump(void) 3750void ftrace_dump(void)
3108{ 3751{
3109 static DEFINE_SPINLOCK(ftrace_dump_lock); 3752 static DEFINE_SPINLOCK(ftrace_dump_lock);
@@ -3128,6 +3771,9 @@ void ftrace_dump(void)
3128 atomic_inc(&global_trace.data[cpu]->disabled); 3771 atomic_inc(&global_trace.data[cpu]->disabled);
3129 } 3772 }
3130 3773
3774 /* don't look at user memory in panic mode */
3775 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3776
3131 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 3777 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3132 3778
3133 iter.tr = &global_trace; 3779 iter.tr = &global_trace;
@@ -3221,7 +3867,6 @@ __init static int tracer_alloc_buffers(void)
3221#endif 3867#endif
3222 3868
3223 /* All seems OK, enable tracing */ 3869 /* All seems OK, enable tracing */
3224 global_trace.ctrl = tracer_enabled;
3225 tracing_disabled = 0; 3870 tracing_disabled = 0;
3226 3871
3227 atomic_notifier_chain_register(&panic_notifier_list, 3872 atomic_notifier_chain_register(&panic_notifier_list,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8465ad052707..cc7a4f864036 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -8,6 +8,7 @@
8#include <linux/ring_buffer.h> 8#include <linux/ring_buffer.h>
9#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <trace/boot.h>
11 12
12enum trace_type { 13enum trace_type {
13 __TRACE_FIRST_TYPE = 0, 14 __TRACE_FIRST_TYPE = 0,
@@ -21,7 +22,14 @@ enum trace_type {
21 TRACE_SPECIAL, 22 TRACE_SPECIAL,
22 TRACE_MMIO_RW, 23 TRACE_MMIO_RW,
23 TRACE_MMIO_MAP, 24 TRACE_MMIO_MAP,
24 TRACE_BOOT, 25 TRACE_BRANCH,
26 TRACE_BOOT_CALL,
27 TRACE_BOOT_RET,
28 TRACE_GRAPH_RET,
29 TRACE_GRAPH_ENT,
30 TRACE_USER_STACK,
31 TRACE_HW_BRANCHES,
32 TRACE_POWER,
25 33
26 __TRACE_LAST_TYPE 34 __TRACE_LAST_TYPE
27}; 35};
@@ -38,6 +46,7 @@ struct trace_entry {
38 unsigned char flags; 46 unsigned char flags;
39 unsigned char preempt_count; 47 unsigned char preempt_count;
40 int pid; 48 int pid;
49 int tgid;
41}; 50};
42 51
43/* 52/*
@@ -48,6 +57,18 @@ struct ftrace_entry {
48 unsigned long ip; 57 unsigned long ip;
49 unsigned long parent_ip; 58 unsigned long parent_ip;
50}; 59};
60
61/* Function call entry */
62struct ftrace_graph_ent_entry {
63 struct trace_entry ent;
64 struct ftrace_graph_ent graph_ent;
65};
66
67/* Function return entry */
68struct ftrace_graph_ret_entry {
69 struct trace_entry ent;
70 struct ftrace_graph_ret ret;
71};
51extern struct tracer boot_tracer; 72extern struct tracer boot_tracer;
52 73
53/* 74/*
@@ -85,12 +106,18 @@ struct stack_entry {
85 unsigned long caller[FTRACE_STACK_ENTRIES]; 106 unsigned long caller[FTRACE_STACK_ENTRIES];
86}; 107};
87 108
109struct userstack_entry {
110 struct trace_entry ent;
111 unsigned long caller[FTRACE_STACK_ENTRIES];
112};
113
88/* 114/*
89 * ftrace_printk entry: 115 * ftrace_printk entry:
90 */ 116 */
91struct print_entry { 117struct print_entry {
92 struct trace_entry ent; 118 struct trace_entry ent;
93 unsigned long ip; 119 unsigned long ip;
120 int depth;
94 char buf[]; 121 char buf[];
95}; 122};
96 123
@@ -112,9 +139,35 @@ struct trace_mmiotrace_map {
112 struct mmiotrace_map map; 139 struct mmiotrace_map map;
113}; 140};
114 141
115struct trace_boot { 142struct trace_boot_call {
116 struct trace_entry ent; 143 struct trace_entry ent;
117 struct boot_trace initcall; 144 struct boot_trace_call boot_call;
145};
146
147struct trace_boot_ret {
148 struct trace_entry ent;
149 struct boot_trace_ret boot_ret;
150};
151
152#define TRACE_FUNC_SIZE 30
153#define TRACE_FILE_SIZE 20
154struct trace_branch {
155 struct trace_entry ent;
156 unsigned line;
157 char func[TRACE_FUNC_SIZE+1];
158 char file[TRACE_FILE_SIZE+1];
159 char correct;
160};
161
162struct hw_branch_entry {
163 struct trace_entry ent;
164 u64 from;
165 u64 to;
166};
167
168struct trace_power {
169 struct trace_entry ent;
170 struct power_trace state_data;
118}; 171};
119 172
120/* 173/*
@@ -172,7 +225,6 @@ struct trace_iterator;
172struct trace_array { 225struct trace_array {
173 struct ring_buffer *buffer; 226 struct ring_buffer *buffer;
174 unsigned long entries; 227 unsigned long entries;
175 long ctrl;
176 int cpu; 228 int cpu;
177 cycle_t time_start; 229 cycle_t time_start;
178 struct task_struct *waiter; 230 struct task_struct *waiter;
@@ -212,13 +264,22 @@ extern void __ftrace_bad_type(void);
212 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 264 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
213 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ 265 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
214 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 266 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
267 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
215 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 268 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
216 IF_ASSIGN(var, ent, struct special_entry, 0); \ 269 IF_ASSIGN(var, ent, struct special_entry, 0); \
217 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 270 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
218 TRACE_MMIO_RW); \ 271 TRACE_MMIO_RW); \
219 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 272 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
220 TRACE_MMIO_MAP); \ 273 TRACE_MMIO_MAP); \
221 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ 274 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
275 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
276 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
277 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
278 TRACE_GRAPH_ENT); \
279 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
280 TRACE_GRAPH_RET); \
281 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
282 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
222 __ftrace_bad_type(); \ 283 __ftrace_bad_type(); \
223 } while (0) 284 } while (0)
224 285
@@ -229,29 +290,56 @@ enum print_line_t {
229 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ 290 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
230}; 291};
231 292
293
294/*
295 * An option specific to a tracer. This is a boolean value.
296 * The bit is the bit index that sets its value on the
297 * flags value in struct tracer_flags.
298 */
299struct tracer_opt {
300 const char *name; /* Will appear on the trace_options file */
301 u32 bit; /* Mask assigned in val field in tracer_flags */
302};
303
304/*
305 * The set of specific options for a tracer. Your tracer
306 * have to set the initial value of the flags val.
307 */
308struct tracer_flags {
309 u32 val;
310 struct tracer_opt *opts;
311};
312
313/* Makes more easy to define a tracer opt */
314#define TRACER_OPT(s, b) .name = #s, .bit = b
315
232/* 316/*
233 * A specific tracer, represented by methods that operate on a trace array: 317 * A specific tracer, represented by methods that operate on a trace array:
234 */ 318 */
235struct tracer { 319struct tracer {
236 const char *name; 320 const char *name;
237 void (*init)(struct trace_array *tr); 321 /* Your tracer should raise a warning if init fails */
322 int (*init)(struct trace_array *tr);
238 void (*reset)(struct trace_array *tr); 323 void (*reset)(struct trace_array *tr);
324 void (*start)(struct trace_array *tr);
325 void (*stop)(struct trace_array *tr);
239 void (*open)(struct trace_iterator *iter); 326 void (*open)(struct trace_iterator *iter);
240 void (*pipe_open)(struct trace_iterator *iter); 327 void (*pipe_open)(struct trace_iterator *iter);
241 void (*close)(struct trace_iterator *iter); 328 void (*close)(struct trace_iterator *iter);
242 void (*start)(struct trace_iterator *iter);
243 void (*stop)(struct trace_iterator *iter);
244 ssize_t (*read)(struct trace_iterator *iter, 329 ssize_t (*read)(struct trace_iterator *iter,
245 struct file *filp, char __user *ubuf, 330 struct file *filp, char __user *ubuf,
246 size_t cnt, loff_t *ppos); 331 size_t cnt, loff_t *ppos);
247 void (*ctrl_update)(struct trace_array *tr);
248#ifdef CONFIG_FTRACE_STARTUP_TEST 332#ifdef CONFIG_FTRACE_STARTUP_TEST
249 int (*selftest)(struct tracer *trace, 333 int (*selftest)(struct tracer *trace,
250 struct trace_array *tr); 334 struct trace_array *tr);
251#endif 335#endif
336 void (*print_header)(struct seq_file *m);
252 enum print_line_t (*print_line)(struct trace_iterator *iter); 337 enum print_line_t (*print_line)(struct trace_iterator *iter);
338 /* If you handled the flag setting, return 0 */
339 int (*set_flag)(u32 old_flags, u32 bit, int set);
253 struct tracer *next; 340 struct tracer *next;
254 int print_max; 341 int print_max;
342 struct tracer_flags *flags;
255}; 343};
256 344
257struct trace_seq { 345struct trace_seq {
@@ -279,10 +367,14 @@ struct trace_iterator {
279 unsigned long iter_flags; 367 unsigned long iter_flags;
280 loff_t pos; 368 loff_t pos;
281 long idx; 369 long idx;
370
371 cpumask_t started;
282}; 372};
283 373
374int tracing_is_enabled(void);
284void trace_wake_up(void); 375void trace_wake_up(void);
285void tracing_reset(struct trace_array *tr, int cpu); 376void tracing_reset(struct trace_array *tr, int cpu);
377void tracing_reset_online_cpus(struct trace_array *tr);
286int tracing_open_generic(struct inode *inode, struct file *filp); 378int tracing_open_generic(struct inode *inode, struct file *filp);
287struct dentry *tracing_init_dentry(void); 379struct dentry *tracing_init_dentry(void);
288void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 380void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
@@ -321,8 +413,15 @@ void trace_function(struct trace_array *tr,
321 unsigned long parent_ip, 413 unsigned long parent_ip,
322 unsigned long flags, int pc); 414 unsigned long flags, int pc);
323 415
416void trace_graph_return(struct ftrace_graph_ret *trace);
417int trace_graph_entry(struct ftrace_graph_ent *trace);
418void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
419
324void tracing_start_cmdline_record(void); 420void tracing_start_cmdline_record(void);
325void tracing_stop_cmdline_record(void); 421void tracing_stop_cmdline_record(void);
422void tracing_sched_switch_assign_trace(struct trace_array *tr);
423void tracing_stop_sched_switch_record(void);
424void tracing_start_sched_switch_record(void);
326int register_tracer(struct tracer *type); 425int register_tracer(struct tracer *type);
327void unregister_tracer(struct tracer *type); 426void unregister_tracer(struct tracer *type);
328 427
@@ -358,6 +457,7 @@ struct tracer_switch_ops {
358 struct tracer_switch_ops *next; 457 struct tracer_switch_ops *next;
359}; 458};
360 459
460char *trace_find_cmdline(int pid);
361#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 461#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
362 462
363#ifdef CONFIG_DYNAMIC_FTRACE 463#ifdef CONFIG_DYNAMIC_FTRACE
@@ -383,19 +483,79 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
383 struct trace_array *tr); 483 struct trace_array *tr);
384extern int trace_selftest_startup_sysprof(struct tracer *trace, 484extern int trace_selftest_startup_sysprof(struct tracer *trace,
385 struct trace_array *tr); 485 struct trace_array *tr);
486extern int trace_selftest_startup_branch(struct tracer *trace,
487 struct trace_array *tr);
386#endif /* CONFIG_FTRACE_STARTUP_TEST */ 488#endif /* CONFIG_FTRACE_STARTUP_TEST */
387 489
388extern void *head_page(struct trace_array_cpu *data); 490extern void *head_page(struct trace_array_cpu *data);
389extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); 491extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
390extern void trace_seq_print_cont(struct trace_seq *s, 492extern void trace_seq_print_cont(struct trace_seq *s,
391 struct trace_iterator *iter); 493 struct trace_iterator *iter);
494
495extern int
496seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
497 unsigned long sym_flags);
392extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 498extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
393 size_t cnt); 499 size_t cnt);
394extern long ns2usecs(cycle_t nsec); 500extern long ns2usecs(cycle_t nsec);
395extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); 501extern int
502trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
396 503
397extern unsigned long trace_flags; 504extern unsigned long trace_flags;
398 505
506/* Standard output formatting function used for function return traces */
507#ifdef CONFIG_FUNCTION_GRAPH_TRACER
508extern enum print_line_t print_graph_function(struct trace_iterator *iter);
509
510#ifdef CONFIG_DYNAMIC_FTRACE
511/* TODO: make this variable */
512#define FTRACE_GRAPH_MAX_FUNCS 32
513extern int ftrace_graph_count;
514extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
515
516static inline int ftrace_graph_addr(unsigned long addr)
517{
518 int i;
519
520 if (!ftrace_graph_count || test_tsk_trace_graph(current))
521 return 1;
522
523 for (i = 0; i < ftrace_graph_count; i++) {
524 if (addr == ftrace_graph_funcs[i])
525 return 1;
526 }
527
528 return 0;
529}
530#else
531static inline int ftrace_trace_addr(unsigned long addr)
532{
533 return 1;
534}
535static inline int ftrace_graph_addr(unsigned long addr)
536{
537 return 1;
538}
539#endif /* CONFIG_DYNAMIC_FTRACE */
540
541#else /* CONFIG_FUNCTION_GRAPH_TRACER */
542static inline enum print_line_t
543print_graph_function(struct trace_iterator *iter)
544{
545 return TRACE_TYPE_UNHANDLED;
546}
547#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
548
549extern struct pid *ftrace_pid_trace;
550
551static inline int ftrace_trace_task(struct task_struct *task)
552{
553 if (!ftrace_pid_trace)
554 return 1;
555
556 return test_tsk_trace_trace(task);
557}
558
399/* 559/*
400 * trace_iterator_flags is an enumeration that defines bit 560 * trace_iterator_flags is an enumeration that defines bit
401 * positions into trace_flags that controls the output. 561 * positions into trace_flags that controls the output.
@@ -415,8 +575,93 @@ enum trace_iterator_flags {
415 TRACE_ITER_STACKTRACE = 0x100, 575 TRACE_ITER_STACKTRACE = 0x100,
416 TRACE_ITER_SCHED_TREE = 0x200, 576 TRACE_ITER_SCHED_TREE = 0x200,
417 TRACE_ITER_PRINTK = 0x400, 577 TRACE_ITER_PRINTK = 0x400,
578 TRACE_ITER_PREEMPTONLY = 0x800,
579 TRACE_ITER_BRANCH = 0x1000,
580 TRACE_ITER_ANNOTATE = 0x2000,
581 TRACE_ITER_USERSTACKTRACE = 0x4000,
582 TRACE_ITER_SYM_USEROBJ = 0x8000,
583 TRACE_ITER_PRINTK_MSGONLY = 0x10000
418}; 584};
419 585
586/*
587 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
588 * control the output of kernel symbols.
589 */
590#define TRACE_ITER_SYM_MASK \
591 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
592
420extern struct tracer nop_trace; 593extern struct tracer nop_trace;
421 594
595/**
596 * ftrace_preempt_disable - disable preemption scheduler safe
597 *
598 * When tracing can happen inside the scheduler, there exists
599 * cases that the tracing might happen before the need_resched
600 * flag is checked. If this happens and the tracer calls
601 * preempt_enable (after a disable), a schedule might take place
602 * causing an infinite recursion.
603 *
604 * To prevent this, we read the need_recshed flag before
605 * disabling preemption. When we want to enable preemption we
606 * check the flag, if it is set, then we call preempt_enable_no_resched.
607 * Otherwise, we call preempt_enable.
608 *
609 * The rational for doing the above is that if need resched is set
610 * and we have yet to reschedule, we are either in an atomic location
611 * (where we do not need to check for scheduling) or we are inside
612 * the scheduler and do not want to resched.
613 */
614static inline int ftrace_preempt_disable(void)
615{
616 int resched;
617
618 resched = need_resched();
619 preempt_disable_notrace();
620
621 return resched;
622}
623
624/**
625 * ftrace_preempt_enable - enable preemption scheduler safe
626 * @resched: the return value from ftrace_preempt_disable
627 *
628 * This is a scheduler safe way to enable preemption and not miss
629 * any preemption checks. The disabled saved the state of preemption.
630 * If resched is set, then we were either inside an atomic or
631 * are inside the scheduler (we would have already scheduled
632 * otherwise). In this case, we do not want to call normal
633 * preempt_enable, but preempt_enable_no_resched instead.
634 */
635static inline void ftrace_preempt_enable(int resched)
636{
637 if (resched)
638 preempt_enable_no_resched_notrace();
639 else
640 preempt_enable_notrace();
641}
642
643#ifdef CONFIG_BRANCH_TRACER
644extern int enable_branch_tracing(struct trace_array *tr);
645extern void disable_branch_tracing(void);
646static inline int trace_branch_enable(struct trace_array *tr)
647{
648 if (trace_flags & TRACE_ITER_BRANCH)
649 return enable_branch_tracing(tr);
650 return 0;
651}
652static inline void trace_branch_disable(void)
653{
654 /* due to races, always disable */
655 disable_branch_tracing();
656}
657#else
658static inline int trace_branch_enable(struct trace_array *tr)
659{
660 return 0;
661}
662static inline void trace_branch_disable(void)
663{
664}
665#endif /* CONFIG_BRANCH_TRACER */
666
422#endif /* _LINUX_KERNEL_TRACE_H */ 667#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index d0a5e50eeff2..3ccebde28482 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -13,101 +13,161 @@
13#include "trace.h" 13#include "trace.h"
14 14
15static struct trace_array *boot_trace; 15static struct trace_array *boot_trace;
16static int trace_boot_enabled; 16static bool pre_initcalls_finished;
17 17
18 18/* Tells the boot tracer that the pre_smp_initcalls are finished.
19/* Should be started after do_pre_smp_initcalls() in init/main.c */ 19 * So we are ready .
20 * It doesn't enable sched events tracing however.
21 * You have to call enable_boot_trace to do so.
22 */
20void start_boot_trace(void) 23void start_boot_trace(void)
21{ 24{
22 trace_boot_enabled = 1; 25 pre_initcalls_finished = true;
23} 26}
24 27
25void stop_boot_trace(void) 28void enable_boot_trace(void)
26{ 29{
27 trace_boot_enabled = 0; 30 if (pre_initcalls_finished)
31 tracing_start_sched_switch_record();
28} 32}
29 33
30void reset_boot_trace(struct trace_array *tr) 34void disable_boot_trace(void)
31{ 35{
32 stop_boot_trace(); 36 if (pre_initcalls_finished)
37 tracing_stop_sched_switch_record();
33} 38}
34 39
35static void boot_trace_init(struct trace_array *tr) 40static int boot_trace_init(struct trace_array *tr)
36{ 41{
37 int cpu; 42 int cpu;
38 boot_trace = tr; 43 boot_trace = tr;
39 44
40 trace_boot_enabled = 0;
41
42 for_each_cpu_mask(cpu, cpu_possible_map) 45 for_each_cpu_mask(cpu, cpu_possible_map)
43 tracing_reset(tr, cpu); 46 tracing_reset(tr, cpu);
47
48 tracing_sched_switch_assign_trace(tr);
49 return 0;
44} 50}
45 51
46static void boot_trace_ctrl_update(struct trace_array *tr) 52static enum print_line_t
53initcall_call_print_line(struct trace_iterator *iter)
47{ 54{
48 if (tr->ctrl) 55 struct trace_entry *entry = iter->ent;
49 start_boot_trace(); 56 struct trace_seq *s = &iter->seq;
57 struct trace_boot_call *field;
58 struct boot_trace_call *call;
59 u64 ts;
60 unsigned long nsec_rem;
61 int ret;
62
63 trace_assign_type(field, entry);
64 call = &field->boot_call;
65 ts = iter->ts;
66 nsec_rem = do_div(ts, 1000000000);
67
68 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
69 (unsigned long)ts, nsec_rem, call->func, call->caller);
70
71 if (!ret)
72 return TRACE_TYPE_PARTIAL_LINE;
50 else 73 else
51 stop_boot_trace(); 74 return TRACE_TYPE_HANDLED;
52} 75}
53 76
54static enum print_line_t initcall_print_line(struct trace_iterator *iter) 77static enum print_line_t
78initcall_ret_print_line(struct trace_iterator *iter)
55{ 79{
56 int ret;
57 struct trace_entry *entry = iter->ent; 80 struct trace_entry *entry = iter->ent;
58 struct trace_boot *field = (struct trace_boot *)entry;
59 struct boot_trace *it = &field->initcall;
60 struct trace_seq *s = &iter->seq; 81 struct trace_seq *s = &iter->seq;
61 struct timespec calltime = ktime_to_timespec(it->calltime); 82 struct trace_boot_ret *field;
62 struct timespec rettime = ktime_to_timespec(it->rettime); 83 struct boot_trace_ret *init_ret;
63 84 u64 ts;
64 if (entry->type == TRACE_BOOT) { 85 unsigned long nsec_rem;
65 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", 86 int ret;
66 calltime.tv_sec, 87
67 calltime.tv_nsec, 88 trace_assign_type(field, entry);
68 it->func, it->caller); 89 init_ret = &field->boot_ret;
69 if (!ret) 90 ts = iter->ts;
70 return TRACE_TYPE_PARTIAL_LINE; 91 nsec_rem = do_div(ts, 1000000000);
71 92
72 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " 93 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
73 "returned %d after %lld msecs\n", 94 "returned %d after %llu msecs\n",
74 rettime.tv_sec, 95 (unsigned long) ts,
75 rettime.tv_nsec, 96 nsec_rem,
76 it->func, it->result, it->duration); 97 init_ret->func, init_ret->result, init_ret->duration);
77 98
78 if (!ret) 99 if (!ret)
79 return TRACE_TYPE_PARTIAL_LINE; 100 return TRACE_TYPE_PARTIAL_LINE;
101 else
80 return TRACE_TYPE_HANDLED; 102 return TRACE_TYPE_HANDLED;
103}
104
105static enum print_line_t initcall_print_line(struct trace_iterator *iter)
106{
107 struct trace_entry *entry = iter->ent;
108
109 switch (entry->type) {
110 case TRACE_BOOT_CALL:
111 return initcall_call_print_line(iter);
112 case TRACE_BOOT_RET:
113 return initcall_ret_print_line(iter);
114 default:
115 return TRACE_TYPE_UNHANDLED;
81 } 116 }
82 return TRACE_TYPE_UNHANDLED;
83} 117}
84 118
85struct tracer boot_tracer __read_mostly = 119struct tracer boot_tracer __read_mostly =
86{ 120{
87 .name = "initcall", 121 .name = "initcall",
88 .init = boot_trace_init, 122 .init = boot_trace_init,
89 .reset = reset_boot_trace, 123 .reset = tracing_reset_online_cpus,
90 .ctrl_update = boot_trace_ctrl_update,
91 .print_line = initcall_print_line, 124 .print_line = initcall_print_line,
92}; 125};
93 126
94void trace_boot(struct boot_trace *it, initcall_t fn) 127void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
95{ 128{
96 struct ring_buffer_event *event; 129 struct ring_buffer_event *event;
97 struct trace_boot *entry; 130 struct trace_boot_call *entry;
98 struct trace_array_cpu *data;
99 unsigned long irq_flags; 131 unsigned long irq_flags;
100 struct trace_array *tr = boot_trace; 132 struct trace_array *tr = boot_trace;
101 133
102 if (!trace_boot_enabled) 134 if (!pre_initcalls_finished)
103 return; 135 return;
104 136
105 /* Get its name now since this function could 137 /* Get its name now since this function could
106 * disappear because it is in the .init section. 138 * disappear because it is in the .init section.
107 */ 139 */
108 sprint_symbol(it->func, (unsigned long)fn); 140 sprint_symbol(bt->func, (unsigned long)fn);
141 preempt_disable();
142
143 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
144 &irq_flags);
145 if (!event)
146 goto out;
147 entry = ring_buffer_event_data(event);
148 tracing_generic_entry_update(&entry->ent, 0, 0);
149 entry->ent.type = TRACE_BOOT_CALL;
150 entry->boot_call = *bt;
151 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
152
153 trace_wake_up();
154
155 out:
156 preempt_enable();
157}
158
159void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
160{
161 struct ring_buffer_event *event;
162 struct trace_boot_ret *entry;
163 unsigned long irq_flags;
164 struct trace_array *tr = boot_trace;
165
166 if (!pre_initcalls_finished)
167 return;
168
169 sprint_symbol(bt->func, (unsigned long)fn);
109 preempt_disable(); 170 preempt_disable();
110 data = tr->data[smp_processor_id()];
111 171
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 172 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
113 &irq_flags); 173 &irq_flags);
@@ -115,8 +175,8 @@ void trace_boot(struct boot_trace *it, initcall_t fn)
115 goto out; 175 goto out;
116 entry = ring_buffer_event_data(event); 176 entry = ring_buffer_event_data(event);
117 tracing_generic_entry_update(&entry->ent, 0, 0); 177 tracing_generic_entry_update(&entry->ent, 0, 0);
118 entry->ent.type = TRACE_BOOT; 178 entry->ent.type = TRACE_BOOT_RET;
119 entry->initcall = *it; 179 entry->boot_ret = *bt;
120 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 180 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
121 181
122 trace_wake_up(); 182 trace_wake_up();
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
new file mode 100644
index 000000000000..6c00feb3bac7
--- /dev/null
+++ b/kernel/trace/trace_branch.c
@@ -0,0 +1,342 @@
1/*
2 * unlikely profiler
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/irqflags.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
12#include <linux/module.h>
13#include <linux/ftrace.h>
14#include <linux/hash.h>
15#include <linux/fs.h>
16#include <asm/local.h>
17#include "trace.h"
18
19#ifdef CONFIG_BRANCH_TRACER
20
21static int branch_tracing_enabled __read_mostly;
22static DEFINE_MUTEX(branch_tracing_mutex);
23static struct trace_array *branch_tracer;
24
25static void
26probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
27{
28 struct trace_array *tr = branch_tracer;
29 struct ring_buffer_event *event;
30 struct trace_branch *entry;
31 unsigned long flags, irq_flags;
32 int cpu, pc;
33 const char *p;
34
35 /*
36 * I would love to save just the ftrace_likely_data pointer, but
37 * this code can also be used by modules. Ugly things can happen
38 * if the module is unloaded, and then we go and read the
39 * pointer. This is slower, but much safer.
40 */
41
42 if (unlikely(!tr))
43 return;
44
45 local_irq_save(flags);
46 cpu = raw_smp_processor_id();
47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
48 goto out;
49
50 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
51 &irq_flags);
52 if (!event)
53 goto out;
54
55 pc = preempt_count();
56 entry = ring_buffer_event_data(event);
57 tracing_generic_entry_update(&entry->ent, flags, pc);
58 entry->ent.type = TRACE_BRANCH;
59
60 /* Strip off the path, only save the file */
61 p = f->file + strlen(f->file);
62 while (p >= f->file && *p != '/')
63 p--;
64 p++;
65
66 strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
67 strncpy(entry->file, p, TRACE_FILE_SIZE);
68 entry->func[TRACE_FUNC_SIZE] = 0;
69 entry->file[TRACE_FILE_SIZE] = 0;
70 entry->line = f->line;
71 entry->correct = val == expect;
72
73 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
74
75 out:
76 atomic_dec(&tr->data[cpu]->disabled);
77 local_irq_restore(flags);
78}
79
80static inline
81void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
82{
83 if (!branch_tracing_enabled)
84 return;
85
86 probe_likely_condition(f, val, expect);
87}
88
89int enable_branch_tracing(struct trace_array *tr)
90{
91 int ret = 0;
92
93 mutex_lock(&branch_tracing_mutex);
94 branch_tracer = tr;
95 /*
96 * Must be seen before enabling. The reader is a condition
97 * where we do not need a matching rmb()
98 */
99 smp_wmb();
100 branch_tracing_enabled++;
101 mutex_unlock(&branch_tracing_mutex);
102
103 return ret;
104}
105
106void disable_branch_tracing(void)
107{
108 mutex_lock(&branch_tracing_mutex);
109
110 if (!branch_tracing_enabled)
111 goto out_unlock;
112
113 branch_tracing_enabled--;
114
115 out_unlock:
116 mutex_unlock(&branch_tracing_mutex);
117}
118
119static void start_branch_trace(struct trace_array *tr)
120{
121 enable_branch_tracing(tr);
122}
123
124static void stop_branch_trace(struct trace_array *tr)
125{
126 disable_branch_tracing();
127}
128
129static int branch_trace_init(struct trace_array *tr)
130{
131 int cpu;
132
133 for_each_online_cpu(cpu)
134 tracing_reset(tr, cpu);
135
136 start_branch_trace(tr);
137 return 0;
138}
139
140static void branch_trace_reset(struct trace_array *tr)
141{
142 stop_branch_trace(tr);
143}
144
145struct tracer branch_trace __read_mostly =
146{
147 .name = "branch",
148 .init = branch_trace_init,
149 .reset = branch_trace_reset,
150#ifdef CONFIG_FTRACE_SELFTEST
151 .selftest = trace_selftest_startup_branch,
152#endif
153};
154
155__init static int init_branch_trace(void)
156{
157 return register_tracer(&branch_trace);
158}
159
160device_initcall(init_branch_trace);
161#else
162static inline
163void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
164{
165}
166#endif /* CONFIG_BRANCH_TRACER */
167
168void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
169{
170 /*
171 * I would love to have a trace point here instead, but the
172 * trace point code is so inundated with unlikely and likely
173 * conditions that the recursive nightmare that exists is too
174 * much to try to get working. At least for now.
175 */
176 trace_likely_condition(f, val, expect);
177
178 /* FIXME: Make this atomic! */
179 if (val == expect)
180 f->correct++;
181 else
182 f->incorrect++;
183}
184EXPORT_SYMBOL(ftrace_likely_update);
185
186struct ftrace_pointer {
187 void *start;
188 void *stop;
189 int hit;
190};
191
192static void *
193t_next(struct seq_file *m, void *v, loff_t *pos)
194{
195 const struct ftrace_pointer *f = m->private;
196 struct ftrace_branch_data *p = v;
197
198 (*pos)++;
199
200 if (v == (void *)1)
201 return f->start;
202
203 ++p;
204
205 if ((void *)p >= (void *)f->stop)
206 return NULL;
207
208 return p;
209}
210
211static void *t_start(struct seq_file *m, loff_t *pos)
212{
213 void *t = (void *)1;
214 loff_t l = 0;
215
216 for (; t && l < *pos; t = t_next(m, t, &l))
217 ;
218
219 return t;
220}
221
222static void t_stop(struct seq_file *m, void *p)
223{
224}
225
226static int t_show(struct seq_file *m, void *v)
227{
228 const struct ftrace_pointer *fp = m->private;
229 struct ftrace_branch_data *p = v;
230 const char *f;
231 long percent;
232
233 if (v == (void *)1) {
234 if (fp->hit)
235 seq_printf(m, " miss hit %% ");
236 else
237 seq_printf(m, " correct incorrect %% ");
238 seq_printf(m, " Function "
239 " File Line\n"
240 " ------- --------- - "
241 " -------- "
242 " ---- ----\n");
243 return 0;
244 }
245
246 /* Only print the file, not the path */
247 f = p->file + strlen(p->file);
248 while (f >= p->file && *f != '/')
249 f--;
250 f++;
251
252 /*
253 * The miss is overlayed on correct, and hit on incorrect.
254 */
255 if (p->correct) {
256 percent = p->incorrect * 100;
257 percent /= p->correct + p->incorrect;
258 } else
259 percent = p->incorrect ? 100 : -1;
260
261 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
262 if (percent < 0)
263 seq_printf(m, " X ");
264 else
265 seq_printf(m, "%3ld ", percent);
266 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
267 return 0;
268}
269
270static struct seq_operations tracing_likely_seq_ops = {
271 .start = t_start,
272 .next = t_next,
273 .stop = t_stop,
274 .show = t_show,
275};
276
277static int tracing_branch_open(struct inode *inode, struct file *file)
278{
279 int ret;
280
281 ret = seq_open(file, &tracing_likely_seq_ops);
282 if (!ret) {
283 struct seq_file *m = file->private_data;
284 m->private = (void *)inode->i_private;
285 }
286
287 return ret;
288}
289
290static const struct file_operations tracing_branch_fops = {
291 .open = tracing_branch_open,
292 .read = seq_read,
293 .llseek = seq_lseek,
294};
295
296#ifdef CONFIG_PROFILE_ALL_BRANCHES
297extern unsigned long __start_branch_profile[];
298extern unsigned long __stop_branch_profile[];
299
300static const struct ftrace_pointer ftrace_branch_pos = {
301 .start = __start_branch_profile,
302 .stop = __stop_branch_profile,
303 .hit = 1,
304};
305
306#endif /* CONFIG_PROFILE_ALL_BRANCHES */
307
308extern unsigned long __start_annotated_branch_profile[];
309extern unsigned long __stop_annotated_branch_profile[];
310
311static const struct ftrace_pointer ftrace_annotated_branch_pos = {
312 .start = __start_annotated_branch_profile,
313 .stop = __stop_annotated_branch_profile,
314};
315
316static __init int ftrace_branch_init(void)
317{
318 struct dentry *d_tracer;
319 struct dentry *entry;
320
321 d_tracer = tracing_init_dentry();
322
323 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
324 (void *)&ftrace_annotated_branch_pos,
325 &tracing_branch_fops);
326 if (!entry)
327 pr_warning("Could not create debugfs "
328 "'profile_annotatet_branch' entry\n");
329
330#ifdef CONFIG_PROFILE_ALL_BRANCHES
331 entry = debugfs_create_file("profile_branch", 0444, d_tracer,
332 (void *)&ftrace_branch_pos,
333 &tracing_branch_fops);
334 if (!entry)
335 pr_warning("Could not create debugfs"
336 " 'profile_branch' entry\n");
337#endif
338
339 return 0;
340}
341
342device_initcall(ftrace_branch_init);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 0f85a64003d3..9236d7e25a16 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,20 +16,10 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19static void function_reset(struct trace_array *tr)
20{
21 int cpu;
22
23 tr->time_start = ftrace_now(tr->cpu);
24
25 for_each_online_cpu(cpu)
26 tracing_reset(tr, cpu);
27}
28
29static void start_function_trace(struct trace_array *tr) 19static void start_function_trace(struct trace_array *tr)
30{ 20{
31 tr->cpu = get_cpu(); 21 tr->cpu = get_cpu();
32 function_reset(tr); 22 tracing_reset_online_cpus(tr);
33 put_cpu(); 23 put_cpu();
34 24
35 tracing_start_cmdline_record(); 25 tracing_start_cmdline_record();
@@ -42,24 +32,20 @@ static void stop_function_trace(struct trace_array *tr)
42 tracing_stop_cmdline_record(); 32 tracing_stop_cmdline_record();
43} 33}
44 34
45static void function_trace_init(struct trace_array *tr) 35static int function_trace_init(struct trace_array *tr)
46{ 36{
47 if (tr->ctrl) 37 start_function_trace(tr);
48 start_function_trace(tr); 38 return 0;
49} 39}
50 40
51static void function_trace_reset(struct trace_array *tr) 41static void function_trace_reset(struct trace_array *tr)
52{ 42{
53 if (tr->ctrl) 43 stop_function_trace(tr);
54 stop_function_trace(tr);
55} 44}
56 45
57static void function_trace_ctrl_update(struct trace_array *tr) 46static void function_trace_start(struct trace_array *tr)
58{ 47{
59 if (tr->ctrl) 48 tracing_reset_online_cpus(tr);
60 start_function_trace(tr);
61 else
62 stop_function_trace(tr);
63} 49}
64 50
65static struct tracer function_trace __read_mostly = 51static struct tracer function_trace __read_mostly =
@@ -67,7 +53,7 @@ static struct tracer function_trace __read_mostly =
67 .name = "function", 53 .name = "function",
68 .init = function_trace_init, 54 .init = function_trace_init,
69 .reset = function_trace_reset, 55 .reset = function_trace_reset,
70 .ctrl_update = function_trace_ctrl_update, 56 .start = function_trace_start,
71#ifdef CONFIG_FTRACE_SELFTEST 57#ifdef CONFIG_FTRACE_SELFTEST
72 .selftest = trace_selftest_startup_function, 58 .selftest = trace_selftest_startup_function,
73#endif 59#endif
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644
index 000000000000..4bf39fcae97a
--- /dev/null
+++ b/kernel/trace/trace_functions_graph.c
@@ -0,0 +1,669 @@
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15
16#define TRACE_GRAPH_INDENT 2
17
18/* Flag options */
19#define TRACE_GRAPH_PRINT_OVERRUN 0x1
20#define TRACE_GRAPH_PRINT_CPU 0x2
21#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
22#define TRACE_GRAPH_PRINT_PROC 0x8
23
24static struct tracer_opt trace_opts[] = {
25 /* Display overruns ? */
26 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
27 /* Display CPU ? */
28 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
29 /* Display Overhead ? */
30 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
31 /* Display proc name/pid */
32 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
33 { } /* Empty entry */
34};
35
36static struct tracer_flags tracer_flags = {
37 /* Don't display overruns and proc by default */
38 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
39 .opts = trace_opts
40};
41
42/* pid on the last trace processed */
43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
44
45static int graph_trace_init(struct trace_array *tr)
46{
47 int cpu, ret;
48
49 for_each_online_cpu(cpu)
50 tracing_reset(tr, cpu);
51
52 ret = register_ftrace_graph(&trace_graph_return,
53 &trace_graph_entry);
54 if (ret)
55 return ret;
56 tracing_start_cmdline_record();
57
58 return 0;
59}
60
61static void graph_trace_reset(struct trace_array *tr)
62{
63 tracing_stop_cmdline_record();
64 unregister_ftrace_graph();
65}
66
67static inline int log10_cpu(int nb)
68{
69 if (nb / 100)
70 return 3;
71 if (nb / 10)
72 return 2;
73 return 1;
74}
75
76static enum print_line_t
77print_graph_cpu(struct trace_seq *s, int cpu)
78{
79 int i;
80 int ret;
81 int log10_this = log10_cpu(cpu);
82 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
83
84
85 /*
86 * Start with a space character - to make it stand out
87 * to the right a bit when trace output is pasted into
88 * email:
89 */
90 ret = trace_seq_printf(s, " ");
91
92 /*
93 * Tricky - we space the CPU field according to the max
94 * number of online CPUs. On a 2-cpu system it would take
95 * a maximum of 1 digit - on a 128 cpu system it would
96 * take up to 3 digits:
97 */
98 for (i = 0; i < log10_all - log10_this; i++) {
99 ret = trace_seq_printf(s, " ");
100 if (!ret)
101 return TRACE_TYPE_PARTIAL_LINE;
102 }
103 ret = trace_seq_printf(s, "%d) ", cpu);
104 if (!ret)
105 return TRACE_TYPE_PARTIAL_LINE;
106
107 return TRACE_TYPE_HANDLED;
108}
109
110#define TRACE_GRAPH_PROCINFO_LENGTH 14
111
112static enum print_line_t
113print_graph_proc(struct trace_seq *s, pid_t pid)
114{
115 int i;
116 int ret;
117 int len;
118 char comm[8];
119 int spaces = 0;
120 /* sign + log10(MAX_INT) + '\0' */
121 char pid_str[11];
122
123 strncpy(comm, trace_find_cmdline(pid), 7);
124 comm[7] = '\0';
125 sprintf(pid_str, "%d", pid);
126
127 /* 1 stands for the "-" character */
128 len = strlen(comm) + strlen(pid_str) + 1;
129
130 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
131 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
132
133 /* First spaces to align center */
134 for (i = 0; i < spaces / 2; i++) {
135 ret = trace_seq_printf(s, " ");
136 if (!ret)
137 return TRACE_TYPE_PARTIAL_LINE;
138 }
139
140 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
141 if (!ret)
142 return TRACE_TYPE_PARTIAL_LINE;
143
144 /* Last spaces to align center */
145 for (i = 0; i < spaces - (spaces / 2); i++) {
146 ret = trace_seq_printf(s, " ");
147 if (!ret)
148 return TRACE_TYPE_PARTIAL_LINE;
149 }
150 return TRACE_TYPE_HANDLED;
151}
152
153
154/* If the pid changed since the last trace, output this event */
155static enum print_line_t
156verif_pid(struct trace_seq *s, pid_t pid, int cpu)
157{
158 pid_t prev_pid;
159 int ret;
160
161 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
162 return TRACE_TYPE_HANDLED;
163
164 prev_pid = last_pid[cpu];
165 last_pid[cpu] = pid;
166
167/*
168 * Context-switch trace line:
169
170 ------------------------------------------
171 | 1) migration/0--1 => sshd-1755
172 ------------------------------------------
173
174 */
175 ret = trace_seq_printf(s,
176 " ------------------------------------------\n");
177 if (!ret)
178 TRACE_TYPE_PARTIAL_LINE;
179
180 ret = print_graph_cpu(s, cpu);
181 if (ret == TRACE_TYPE_PARTIAL_LINE)
182 TRACE_TYPE_PARTIAL_LINE;
183
184 ret = print_graph_proc(s, prev_pid);
185 if (ret == TRACE_TYPE_PARTIAL_LINE)
186 TRACE_TYPE_PARTIAL_LINE;
187
188 ret = trace_seq_printf(s, " => ");
189 if (!ret)
190 TRACE_TYPE_PARTIAL_LINE;
191
192 ret = print_graph_proc(s, pid);
193 if (ret == TRACE_TYPE_PARTIAL_LINE)
194 TRACE_TYPE_PARTIAL_LINE;
195
196 ret = trace_seq_printf(s,
197 "\n ------------------------------------------\n\n");
198 if (!ret)
199 TRACE_TYPE_PARTIAL_LINE;
200
201 return ret;
202}
203
204static bool
205trace_branch_is_leaf(struct trace_iterator *iter,
206 struct ftrace_graph_ent_entry *curr)
207{
208 struct ring_buffer_iter *ring_iter;
209 struct ring_buffer_event *event;
210 struct ftrace_graph_ret_entry *next;
211
212 ring_iter = iter->buffer_iter[iter->cpu];
213
214 if (!ring_iter)
215 return false;
216
217 event = ring_buffer_iter_peek(ring_iter, NULL);
218
219 if (!event)
220 return false;
221
222 next = ring_buffer_event_data(event);
223
224 if (next->ent.type != TRACE_GRAPH_RET)
225 return false;
226
227 if (curr->ent.pid != next->ent.pid ||
228 curr->graph_ent.func != next->ret.func)
229 return false;
230
231 return true;
232}
233
234static enum print_line_t
235print_graph_irq(struct trace_seq *s, unsigned long addr,
236 enum trace_type type, int cpu, pid_t pid)
237{
238 int ret;
239
240 if (addr < (unsigned long)__irqentry_text_start ||
241 addr >= (unsigned long)__irqentry_text_end)
242 return TRACE_TYPE_UNHANDLED;
243
244 if (type == TRACE_GRAPH_ENT) {
245 ret = trace_seq_printf(s, "==========> | ");
246 } else {
247 /* Cpu */
248 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
249 ret = print_graph_cpu(s, cpu);
250 if (ret == TRACE_TYPE_PARTIAL_LINE)
251 return TRACE_TYPE_PARTIAL_LINE;
252 }
253 /* Proc */
254 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
255 ret = print_graph_proc(s, pid);
256 if (ret == TRACE_TYPE_PARTIAL_LINE)
257 return TRACE_TYPE_PARTIAL_LINE;
258
259 ret = trace_seq_printf(s, " | ");
260 if (!ret)
261 return TRACE_TYPE_PARTIAL_LINE;
262 }
263
264 /* No overhead */
265 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
266 ret = trace_seq_printf(s, " ");
267 if (!ret)
268 return TRACE_TYPE_PARTIAL_LINE;
269 }
270
271 ret = trace_seq_printf(s, "<========== |\n");
272 }
273 if (!ret)
274 return TRACE_TYPE_PARTIAL_LINE;
275 return TRACE_TYPE_HANDLED;
276}
277
278static enum print_line_t
279print_graph_duration(unsigned long long duration, struct trace_seq *s)
280{
281 unsigned long nsecs_rem = do_div(duration, 1000);
282 /* log10(ULONG_MAX) + '\0' */
283 char msecs_str[21];
284 char nsecs_str[5];
285 int ret, len;
286 int i;
287
288 sprintf(msecs_str, "%lu", (unsigned long) duration);
289
290 /* Print msecs */
291 ret = trace_seq_printf(s, msecs_str);
292 if (!ret)
293 return TRACE_TYPE_PARTIAL_LINE;
294
295 len = strlen(msecs_str);
296
297 /* Print nsecs (we don't want to exceed 7 numbers) */
298 if (len < 7) {
299 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
300 ret = trace_seq_printf(s, ".%s", nsecs_str);
301 if (!ret)
302 return TRACE_TYPE_PARTIAL_LINE;
303 len += strlen(nsecs_str);
304 }
305
306 ret = trace_seq_printf(s, " us ");
307 if (!ret)
308 return TRACE_TYPE_PARTIAL_LINE;
309
310 /* Print remaining spaces to fit the row's width */
311 for (i = len; i < 7; i++) {
312 ret = trace_seq_printf(s, " ");
313 if (!ret)
314 return TRACE_TYPE_PARTIAL_LINE;
315 }
316
317 ret = trace_seq_printf(s, "| ");
318 if (!ret)
319 return TRACE_TYPE_PARTIAL_LINE;
320 return TRACE_TYPE_HANDLED;
321
322}
323
324/* Signal a overhead of time execution to the output */
325static int
326print_graph_overhead(unsigned long long duration, struct trace_seq *s)
327{
328 /* Duration exceeded 100 msecs */
329 if (duration > 100000ULL)
330 return trace_seq_printf(s, "! ");
331
332 /* Duration exceeded 10 msecs */
333 if (duration > 10000ULL)
334 return trace_seq_printf(s, "+ ");
335
336 return trace_seq_printf(s, " ");
337}
338
339/* Case of a leaf function on its call entry */
340static enum print_line_t
341print_graph_entry_leaf(struct trace_iterator *iter,
342 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
343{
344 struct ftrace_graph_ret_entry *ret_entry;
345 struct ftrace_graph_ret *graph_ret;
346 struct ring_buffer_event *event;
347 struct ftrace_graph_ent *call;
348 unsigned long long duration;
349 int ret;
350 int i;
351
352 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
353 ret_entry = ring_buffer_event_data(event);
354 graph_ret = &ret_entry->ret;
355 call = &entry->graph_ent;
356 duration = graph_ret->rettime - graph_ret->calltime;
357
358 /* Overhead */
359 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
360 ret = print_graph_overhead(duration, s);
361 if (!ret)
362 return TRACE_TYPE_PARTIAL_LINE;
363 }
364
365 /* Duration */
366 ret = print_graph_duration(duration, s);
367 if (ret == TRACE_TYPE_PARTIAL_LINE)
368 return TRACE_TYPE_PARTIAL_LINE;
369
370 /* Function */
371 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
372 ret = trace_seq_printf(s, " ");
373 if (!ret)
374 return TRACE_TYPE_PARTIAL_LINE;
375 }
376
377 ret = seq_print_ip_sym(s, call->func, 0);
378 if (!ret)
379 return TRACE_TYPE_PARTIAL_LINE;
380
381 ret = trace_seq_printf(s, "();\n");
382 if (!ret)
383 return TRACE_TYPE_PARTIAL_LINE;
384
385 return TRACE_TYPE_HANDLED;
386}
387
388static enum print_line_t
389print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
390 struct trace_seq *s, pid_t pid, int cpu)
391{
392 int i;
393 int ret;
394 struct ftrace_graph_ent *call = &entry->graph_ent;
395
396 /* No overhead */
397 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
398 ret = trace_seq_printf(s, " ");
399 if (!ret)
400 return TRACE_TYPE_PARTIAL_LINE;
401 }
402
403 /* Interrupt */
404 ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
405 if (ret == TRACE_TYPE_UNHANDLED) {
406 /* No time */
407 ret = trace_seq_printf(s, " | ");
408 if (!ret)
409 return TRACE_TYPE_PARTIAL_LINE;
410 } else {
411 if (ret == TRACE_TYPE_PARTIAL_LINE)
412 return TRACE_TYPE_PARTIAL_LINE;
413 }
414
415
416 /* Function */
417 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
418 ret = trace_seq_printf(s, " ");
419 if (!ret)
420 return TRACE_TYPE_PARTIAL_LINE;
421 }
422
423 ret = seq_print_ip_sym(s, call->func, 0);
424 if (!ret)
425 return TRACE_TYPE_PARTIAL_LINE;
426
427 ret = trace_seq_printf(s, "() {\n");
428 if (!ret)
429 return TRACE_TYPE_PARTIAL_LINE;
430
431 return TRACE_TYPE_HANDLED;
432}
433
434static enum print_line_t
435print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
436 struct trace_iterator *iter, int cpu)
437{
438 int ret;
439 struct trace_entry *ent = iter->ent;
440
441 /* Pid */
442 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
443 return TRACE_TYPE_PARTIAL_LINE;
444
445 /* Cpu */
446 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
447 ret = print_graph_cpu(s, cpu);
448 if (ret == TRACE_TYPE_PARTIAL_LINE)
449 return TRACE_TYPE_PARTIAL_LINE;
450 }
451
452 /* Proc */
453 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
454 ret = print_graph_proc(s, ent->pid);
455 if (ret == TRACE_TYPE_PARTIAL_LINE)
456 return TRACE_TYPE_PARTIAL_LINE;
457
458 ret = trace_seq_printf(s, " | ");
459 if (!ret)
460 return TRACE_TYPE_PARTIAL_LINE;
461 }
462
463 if (trace_branch_is_leaf(iter, field))
464 return print_graph_entry_leaf(iter, field, s);
465 else
466 return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
467
468}
469
470static enum print_line_t
471print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
472 struct trace_entry *ent, int cpu)
473{
474 int i;
475 int ret;
476 unsigned long long duration = trace->rettime - trace->calltime;
477
478 /* Pid */
479 if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
480 return TRACE_TYPE_PARTIAL_LINE;
481
482 /* Cpu */
483 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
484 ret = print_graph_cpu(s, cpu);
485 if (ret == TRACE_TYPE_PARTIAL_LINE)
486 return TRACE_TYPE_PARTIAL_LINE;
487 }
488
489 /* Proc */
490 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
491 ret = print_graph_proc(s, ent->pid);
492 if (ret == TRACE_TYPE_PARTIAL_LINE)
493 return TRACE_TYPE_PARTIAL_LINE;
494
495 ret = trace_seq_printf(s, " | ");
496 if (!ret)
497 return TRACE_TYPE_PARTIAL_LINE;
498 }
499
500 /* Overhead */
501 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
502 ret = print_graph_overhead(duration, s);
503 if (!ret)
504 return TRACE_TYPE_PARTIAL_LINE;
505 }
506
507 /* Duration */
508 ret = print_graph_duration(duration, s);
509 if (ret == TRACE_TYPE_PARTIAL_LINE)
510 return TRACE_TYPE_PARTIAL_LINE;
511
512 /* Closing brace */
513 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
514 ret = trace_seq_printf(s, " ");
515 if (!ret)
516 return TRACE_TYPE_PARTIAL_LINE;
517 }
518
519 ret = trace_seq_printf(s, "}\n");
520 if (!ret)
521 return TRACE_TYPE_PARTIAL_LINE;
522
523 /* Overrun */
524 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
525 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
526 trace->overrun);
527 if (!ret)
528 return TRACE_TYPE_PARTIAL_LINE;
529 }
530
531 ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
532 if (ret == TRACE_TYPE_PARTIAL_LINE)
533 return TRACE_TYPE_PARTIAL_LINE;
534
535 return TRACE_TYPE_HANDLED;
536}
537
538static enum print_line_t
539print_graph_comment(struct print_entry *trace, struct trace_seq *s,
540 struct trace_entry *ent, struct trace_iterator *iter)
541{
542 int i;
543 int ret;
544
545 /* Pid */
546 if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
547 return TRACE_TYPE_PARTIAL_LINE;
548
549 /* Cpu */
550 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
551 ret = print_graph_cpu(s, iter->cpu);
552 if (ret == TRACE_TYPE_PARTIAL_LINE)
553 return TRACE_TYPE_PARTIAL_LINE;
554 }
555
556 /* Proc */
557 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
558 ret = print_graph_proc(s, ent->pid);
559 if (ret == TRACE_TYPE_PARTIAL_LINE)
560 return TRACE_TYPE_PARTIAL_LINE;
561
562 ret = trace_seq_printf(s, " | ");
563 if (!ret)
564 return TRACE_TYPE_PARTIAL_LINE;
565 }
566
567 /* No overhead */
568 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
569 ret = trace_seq_printf(s, " ");
570 if (!ret)
571 return TRACE_TYPE_PARTIAL_LINE;
572 }
573
574 /* No time */
575 ret = trace_seq_printf(s, " | ");
576 if (!ret)
577 return TRACE_TYPE_PARTIAL_LINE;
578
579 /* Indentation */
580 if (trace->depth > 0)
581 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
582 ret = trace_seq_printf(s, " ");
583 if (!ret)
584 return TRACE_TYPE_PARTIAL_LINE;
585 }
586
587 /* The comment */
588 ret = trace_seq_printf(s, "/* %s", trace->buf);
589 if (!ret)
590 return TRACE_TYPE_PARTIAL_LINE;
591
592 if (ent->flags & TRACE_FLAG_CONT)
593 trace_seq_print_cont(s, iter);
594
595 ret = trace_seq_printf(s, " */\n");
596 if (!ret)
597 return TRACE_TYPE_PARTIAL_LINE;
598
599 return TRACE_TYPE_HANDLED;
600}
601
602
603enum print_line_t
604print_graph_function(struct trace_iterator *iter)
605{
606 struct trace_seq *s = &iter->seq;
607 struct trace_entry *entry = iter->ent;
608
609 switch (entry->type) {
610 case TRACE_GRAPH_ENT: {
611 struct ftrace_graph_ent_entry *field;
612 trace_assign_type(field, entry);
613 return print_graph_entry(field, s, iter,
614 iter->cpu);
615 }
616 case TRACE_GRAPH_RET: {
617 struct ftrace_graph_ret_entry *field;
618 trace_assign_type(field, entry);
619 return print_graph_return(&field->ret, s, entry, iter->cpu);
620 }
621 case TRACE_PRINT: {
622 struct print_entry *field;
623 trace_assign_type(field, entry);
624 return print_graph_comment(field, s, entry, iter);
625 }
626 default:
627 return TRACE_TYPE_UNHANDLED;
628 }
629}
630
631static void print_graph_headers(struct seq_file *s)
632{
633 /* 1st line */
634 seq_printf(s, "# ");
635 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
636 seq_printf(s, "CPU ");
637 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
638 seq_printf(s, "TASK/PID ");
639 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
640 seq_printf(s, "OVERHEAD/");
641 seq_printf(s, "DURATION FUNCTION CALLS\n");
642
643 /* 2nd line */
644 seq_printf(s, "# ");
645 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
646 seq_printf(s, "| ");
647 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
648 seq_printf(s, "| | ");
649 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
650 seq_printf(s, "| ");
651 seq_printf(s, "| | | | |\n");
652 } else
653 seq_printf(s, " | | | | |\n");
654}
655static struct tracer graph_trace __read_mostly = {
656 .name = "function_graph",
657 .init = graph_trace_init,
658 .reset = graph_trace_reset,
659 .print_line = print_graph_function,
660 .print_header = print_graph_headers,
661 .flags = &tracer_flags,
662};
663
664static __init int init_graph_trace(void)
665{
666 return register_tracer(&graph_trace);
667}
668
669device_initcall(init_graph_trace);
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
new file mode 100644
index 000000000000..b6a3e20a49a9
--- /dev/null
+++ b/kernel/trace/trace_hw_branches.c
@@ -0,0 +1,195 @@
1/*
2 * h/w branch tracer for x86 based on bts
3 *
4 * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/kallsyms.h>
13
14#include <asm/ds.h>
15
16#include "trace.h"
17
18
19#define SIZEOF_BTS (1 << 13)
20
21static DEFINE_PER_CPU(struct bts_tracer *, tracer);
22static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
23
24#define this_tracer per_cpu(tracer, smp_processor_id())
25#define this_buffer per_cpu(buffer, smp_processor_id())
26
27
28static void bts_trace_start_cpu(void *arg)
29{
30 if (this_tracer)
31 ds_release_bts(this_tracer);
32
33 this_tracer =
34 ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
35 /* ovfl = */ NULL, /* th = */ (size_t)-1,
36 BTS_KERNEL);
37 if (IS_ERR(this_tracer)) {
38 this_tracer = NULL;
39 return;
40 }
41}
42
43static void bts_trace_start(struct trace_array *tr)
44{
45 int cpu;
46
47 tracing_reset_online_cpus(tr);
48
49 for_each_cpu_mask(cpu, cpu_possible_map)
50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
51}
52
53static void bts_trace_stop_cpu(void *arg)
54{
55 if (this_tracer) {
56 ds_release_bts(this_tracer);
57 this_tracer = NULL;
58 }
59}
60
61static void bts_trace_stop(struct trace_array *tr)
62{
63 int cpu;
64
65 for_each_cpu_mask(cpu, cpu_possible_map)
66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
67}
68
69static int bts_trace_init(struct trace_array *tr)
70{
71 tracing_reset_online_cpus(tr);
72 bts_trace_start(tr);
73
74 return 0;
75}
76
77static void bts_trace_print_header(struct seq_file *m)
78{
79 seq_puts(m,
80 "# CPU# FROM TO FUNCTION\n");
81 seq_puts(m,
82 "# | | | |\n");
83}
84
85static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
86{
87 struct trace_entry *entry = iter->ent;
88 struct trace_seq *seq = &iter->seq;
89 struct hw_branch_entry *it;
90
91 trace_assign_type(it, entry);
92
93 if (entry->type == TRACE_HW_BRANCHES) {
94 if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
95 trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
96 it->from, it->to) &&
97 (!it->from ||
98 seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
99 trace_seq_printf(seq, "\n"))
100 return TRACE_TYPE_HANDLED;
101 return TRACE_TYPE_PARTIAL_LINE;;
102 }
103 return TRACE_TYPE_UNHANDLED;
104}
105
106void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
107{
108 struct ring_buffer_event *event;
109 struct hw_branch_entry *entry;
110 unsigned long irq;
111
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
113 if (!event)
114 return;
115 entry = ring_buffer_event_data(event);
116 tracing_generic_entry_update(&entry->ent, 0, from);
117 entry->ent.type = TRACE_HW_BRANCHES;
118 entry->ent.cpu = smp_processor_id();
119 entry->from = from;
120 entry->to = to;
121 ring_buffer_unlock_commit(tr->buffer, event, irq);
122}
123
124static void trace_bts_at(struct trace_array *tr,
125 const struct bts_trace *trace, void *at)
126{
127 struct bts_struct bts;
128 int err = 0;
129
130 WARN_ON_ONCE(!trace->read);
131 if (!trace->read)
132 return;
133
134 err = trace->read(this_tracer, at, &bts);
135 if (err < 0)
136 return;
137
138 switch (bts.qualifier) {
139 case BTS_BRANCH:
140 trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
141 break;
142 }
143}
144
145static void trace_bts_cpu(void *arg)
146{
147 struct trace_array *tr = (struct trace_array *) arg;
148 const struct bts_trace *trace;
149 unsigned char *at;
150
151 if (!this_tracer)
152 return;
153
154 ds_suspend_bts(this_tracer);
155 trace = ds_read_bts(this_tracer);
156 if (!trace)
157 goto out;
158
159 for (at = trace->ds.top; (void *)at < trace->ds.end;
160 at += trace->ds.size)
161 trace_bts_at(tr, trace, at);
162
163 for (at = trace->ds.begin; (void *)at < trace->ds.top;
164 at += trace->ds.size)
165 trace_bts_at(tr, trace, at);
166
167out:
168 ds_resume_bts(this_tracer);
169}
170
171static void trace_bts_prepare(struct trace_iterator *iter)
172{
173 int cpu;
174
175 for_each_cpu_mask(cpu, cpu_possible_map)
176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
177}
178
179struct tracer bts_tracer __read_mostly =
180{
181 .name = "hw-branch-tracer",
182 .init = bts_trace_init,
183 .reset = bts_trace_stop,
184 .print_header = bts_trace_print_header,
185 .print_line = bts_trace_print_line,
186 .start = bts_trace_start,
187 .stop = bts_trace_stop,
188 .open = trace_bts_prepare
189};
190
191__init static int init_bts_trace(void)
192{
193 return register_tracer(&bts_tracer);
194}
195device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 9c74071c10e0..7c2e326bbc8b 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -353,15 +353,28 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
353} 353}
354#endif /* CONFIG_PREEMPT_TRACER */ 354#endif /* CONFIG_PREEMPT_TRACER */
355 355
356/*
357 * save_tracer_enabled is used to save the state of the tracer_enabled
358 * variable when we disable it when we open a trace output file.
359 */
360static int save_tracer_enabled;
361
356static void start_irqsoff_tracer(struct trace_array *tr) 362static void start_irqsoff_tracer(struct trace_array *tr)
357{ 363{
358 register_ftrace_function(&trace_ops); 364 register_ftrace_function(&trace_ops);
359 tracer_enabled = 1; 365 if (tracing_is_enabled()) {
366 tracer_enabled = 1;
367 save_tracer_enabled = 1;
368 } else {
369 tracer_enabled = 0;
370 save_tracer_enabled = 0;
371 }
360} 372}
361 373
362static void stop_irqsoff_tracer(struct trace_array *tr) 374static void stop_irqsoff_tracer(struct trace_array *tr)
363{ 375{
364 tracer_enabled = 0; 376 tracer_enabled = 0;
377 save_tracer_enabled = 0;
365 unregister_ftrace_function(&trace_ops); 378 unregister_ftrace_function(&trace_ops);
366} 379}
367 380
@@ -370,53 +383,55 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
370 irqsoff_trace = tr; 383 irqsoff_trace = tr;
371 /* make sure that the tracer is visible */ 384 /* make sure that the tracer is visible */
372 smp_wmb(); 385 smp_wmb();
373 386 start_irqsoff_tracer(tr);
374 if (tr->ctrl)
375 start_irqsoff_tracer(tr);
376} 387}
377 388
378static void irqsoff_tracer_reset(struct trace_array *tr) 389static void irqsoff_tracer_reset(struct trace_array *tr)
379{ 390{
380 if (tr->ctrl) 391 stop_irqsoff_tracer(tr);
381 stop_irqsoff_tracer(tr);
382} 392}
383 393
384static void irqsoff_tracer_ctrl_update(struct trace_array *tr) 394static void irqsoff_tracer_start(struct trace_array *tr)
385{ 395{
386 if (tr->ctrl) 396 tracer_enabled = 1;
387 start_irqsoff_tracer(tr); 397 save_tracer_enabled = 1;
388 else 398}
389 stop_irqsoff_tracer(tr); 399
400static void irqsoff_tracer_stop(struct trace_array *tr)
401{
402 tracer_enabled = 0;
403 save_tracer_enabled = 0;
390} 404}
391 405
392static void irqsoff_tracer_open(struct trace_iterator *iter) 406static void irqsoff_tracer_open(struct trace_iterator *iter)
393{ 407{
394 /* stop the trace while dumping */ 408 /* stop the trace while dumping */
395 if (iter->tr->ctrl) 409 tracer_enabled = 0;
396 stop_irqsoff_tracer(iter->tr);
397} 410}
398 411
399static void irqsoff_tracer_close(struct trace_iterator *iter) 412static void irqsoff_tracer_close(struct trace_iterator *iter)
400{ 413{
401 if (iter->tr->ctrl) 414 /* restart tracing */
402 start_irqsoff_tracer(iter->tr); 415 tracer_enabled = save_tracer_enabled;
403} 416}
404 417
405#ifdef CONFIG_IRQSOFF_TRACER 418#ifdef CONFIG_IRQSOFF_TRACER
406static void irqsoff_tracer_init(struct trace_array *tr) 419static int irqsoff_tracer_init(struct trace_array *tr)
407{ 420{
408 trace_type = TRACER_IRQS_OFF; 421 trace_type = TRACER_IRQS_OFF;
409 422
410 __irqsoff_tracer_init(tr); 423 __irqsoff_tracer_init(tr);
424 return 0;
411} 425}
412static struct tracer irqsoff_tracer __read_mostly = 426static struct tracer irqsoff_tracer __read_mostly =
413{ 427{
414 .name = "irqsoff", 428 .name = "irqsoff",
415 .init = irqsoff_tracer_init, 429 .init = irqsoff_tracer_init,
416 .reset = irqsoff_tracer_reset, 430 .reset = irqsoff_tracer_reset,
431 .start = irqsoff_tracer_start,
432 .stop = irqsoff_tracer_stop,
417 .open = irqsoff_tracer_open, 433 .open = irqsoff_tracer_open,
418 .close = irqsoff_tracer_close, 434 .close = irqsoff_tracer_close,
419 .ctrl_update = irqsoff_tracer_ctrl_update,
420 .print_max = 1, 435 .print_max = 1,
421#ifdef CONFIG_FTRACE_SELFTEST 436#ifdef CONFIG_FTRACE_SELFTEST
422 .selftest = trace_selftest_startup_irqsoff, 437 .selftest = trace_selftest_startup_irqsoff,
@@ -428,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly =
428#endif 443#endif
429 444
430#ifdef CONFIG_PREEMPT_TRACER 445#ifdef CONFIG_PREEMPT_TRACER
431static void preemptoff_tracer_init(struct trace_array *tr) 446static int preemptoff_tracer_init(struct trace_array *tr)
432{ 447{
433 trace_type = TRACER_PREEMPT_OFF; 448 trace_type = TRACER_PREEMPT_OFF;
434 449
435 __irqsoff_tracer_init(tr); 450 __irqsoff_tracer_init(tr);
451 return 0;
436} 452}
437 453
438static struct tracer preemptoff_tracer __read_mostly = 454static struct tracer preemptoff_tracer __read_mostly =
@@ -440,9 +456,10 @@ static struct tracer preemptoff_tracer __read_mostly =
440 .name = "preemptoff", 456 .name = "preemptoff",
441 .init = preemptoff_tracer_init, 457 .init = preemptoff_tracer_init,
442 .reset = irqsoff_tracer_reset, 458 .reset = irqsoff_tracer_reset,
459 .start = irqsoff_tracer_start,
460 .stop = irqsoff_tracer_stop,
443 .open = irqsoff_tracer_open, 461 .open = irqsoff_tracer_open,
444 .close = irqsoff_tracer_close, 462 .close = irqsoff_tracer_close,
445 .ctrl_update = irqsoff_tracer_ctrl_update,
446 .print_max = 1, 463 .print_max = 1,
447#ifdef CONFIG_FTRACE_SELFTEST 464#ifdef CONFIG_FTRACE_SELFTEST
448 .selftest = trace_selftest_startup_preemptoff, 465 .selftest = trace_selftest_startup_preemptoff,
@@ -456,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly =
456#if defined(CONFIG_IRQSOFF_TRACER) && \ 473#if defined(CONFIG_IRQSOFF_TRACER) && \
457 defined(CONFIG_PREEMPT_TRACER) 474 defined(CONFIG_PREEMPT_TRACER)
458 475
459static void preemptirqsoff_tracer_init(struct trace_array *tr) 476static int preemptirqsoff_tracer_init(struct trace_array *tr)
460{ 477{
461 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 478 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
462 479
463 __irqsoff_tracer_init(tr); 480 __irqsoff_tracer_init(tr);
481 return 0;
464} 482}
465 483
466static struct tracer preemptirqsoff_tracer __read_mostly = 484static struct tracer preemptirqsoff_tracer __read_mostly =
@@ -468,9 +486,10 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
468 .name = "preemptirqsoff", 486 .name = "preemptirqsoff",
469 .init = preemptirqsoff_tracer_init, 487 .init = preemptirqsoff_tracer_init,
470 .reset = irqsoff_tracer_reset, 488 .reset = irqsoff_tracer_reset,
489 .start = irqsoff_tracer_start,
490 .stop = irqsoff_tracer_stop,
471 .open = irqsoff_tracer_open, 491 .open = irqsoff_tracer_open,
472 .close = irqsoff_tracer_close, 492 .close = irqsoff_tracer_close,
473 .ctrl_update = irqsoff_tracer_ctrl_update,
474 .print_max = 1, 493 .print_max = 1,
475#ifdef CONFIG_FTRACE_SELFTEST 494#ifdef CONFIG_FTRACE_SELFTEST
476 .selftest = trace_selftest_startup_preemptirqsoff, 495 .selftest = trace_selftest_startup_preemptirqsoff,
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index f28484618ff0..fffcb069f1dc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -18,46 +18,39 @@ struct header_iter {
18 18
19static struct trace_array *mmio_trace_array; 19static struct trace_array *mmio_trace_array;
20static bool overrun_detected; 20static bool overrun_detected;
21static unsigned long prev_overruns;
21 22
22static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
23{ 24{
24 int cpu;
25
26 overrun_detected = false; 25 overrun_detected = false;
27 tr->time_start = ftrace_now(tr->cpu); 26 prev_overruns = 0;
28 27
29 for_each_online_cpu(cpu) 28 tracing_reset_online_cpus(tr);
30 tracing_reset(tr, cpu);
31} 29}
32 30
33static void mmio_trace_init(struct trace_array *tr) 31static int mmio_trace_init(struct trace_array *tr)
34{ 32{
35 pr_debug("in %s\n", __func__); 33 pr_debug("in %s\n", __func__);
36 mmio_trace_array = tr; 34 mmio_trace_array = tr;
37 if (tr->ctrl) { 35
38 mmio_reset_data(tr); 36 mmio_reset_data(tr);
39 enable_mmiotrace(); 37 enable_mmiotrace();
40 } 38 return 0;
41} 39}
42 40
43static void mmio_trace_reset(struct trace_array *tr) 41static void mmio_trace_reset(struct trace_array *tr)
44{ 42{
45 pr_debug("in %s\n", __func__); 43 pr_debug("in %s\n", __func__);
46 if (tr->ctrl) 44
47 disable_mmiotrace(); 45 disable_mmiotrace();
48 mmio_reset_data(tr); 46 mmio_reset_data(tr);
49 mmio_trace_array = NULL; 47 mmio_trace_array = NULL;
50} 48}
51 49
52static void mmio_trace_ctrl_update(struct trace_array *tr) 50static void mmio_trace_start(struct trace_array *tr)
53{ 51{
54 pr_debug("in %s\n", __func__); 52 pr_debug("in %s\n", __func__);
55 if (tr->ctrl) { 53 mmio_reset_data(tr);
56 mmio_reset_data(tr);
57 enable_mmiotrace();
58 } else {
59 disable_mmiotrace();
60 }
61} 54}
62 55
63static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) 56static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
@@ -128,16 +121,12 @@ static void mmio_close(struct trace_iterator *iter)
128 121
129static unsigned long count_overruns(struct trace_iterator *iter) 122static unsigned long count_overruns(struct trace_iterator *iter)
130{ 123{
131 int cpu;
132 unsigned long cnt = 0; 124 unsigned long cnt = 0;
133/* FIXME: */ 125 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
134#if 0 126
135 for_each_online_cpu(cpu) { 127 if (over > prev_overruns)
136 cnt += iter->overrun[cpu]; 128 cnt = over - prev_overruns;
137 iter->overrun[cpu] = 0; 129 prev_overruns = over;
138 }
139#endif
140 (void)cpu;
141 return cnt; 130 return cnt;
142} 131}
143 132
@@ -298,10 +287,10 @@ static struct tracer mmio_tracer __read_mostly =
298 .name = "mmiotrace", 287 .name = "mmiotrace",
299 .init = mmio_trace_init, 288 .init = mmio_trace_init,
300 .reset = mmio_trace_reset, 289 .reset = mmio_trace_reset,
290 .start = mmio_trace_start,
301 .pipe_open = mmio_pipe_open, 291 .pipe_open = mmio_pipe_open,
302 .close = mmio_close, 292 .close = mmio_close,
303 .read = mmio_read, 293 .read = mmio_read,
304 .ctrl_update = mmio_trace_ctrl_update,
305 .print_line = mmio_print_line, 294 .print_line = mmio_print_line,
306}; 295};
307 296
@@ -373,5 +362,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
373 362
374int mmio_trace_printk(const char *fmt, va_list args) 363int mmio_trace_printk(const char *fmt, va_list args)
375{ 364{
376 return trace_vprintk(0, fmt, args); 365 return trace_vprintk(0, -1, fmt, args);
377} 366}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 4592b4862515..b9767acd30ac 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -12,6 +12,27 @@
12 12
13#include "trace.h" 13#include "trace.h"
14 14
15/* Our two options */
16enum {
17 TRACE_NOP_OPT_ACCEPT = 0x1,
18 TRACE_NOP_OPT_REFUSE = 0x2
19};
20
21/* Options for the tracer (see trace_options file) */
22static struct tracer_opt nop_opts[] = {
23 /* Option that will be accepted by set_flag callback */
24 { TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) },
25 /* Option that will be refused by set_flag callback */
26 { TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) },
27 { } /* Always set a last empty entry */
28};
29
30static struct tracer_flags nop_flags = {
31 /* You can check your flags value here when you want. */
32 .val = 0, /* By default: all flags disabled */
33 .opts = nop_opts
34};
35
15static struct trace_array *ctx_trace; 36static struct trace_array *ctx_trace;
16 37
17static void start_nop_trace(struct trace_array *tr) 38static void start_nop_trace(struct trace_array *tr)
@@ -24,7 +45,7 @@ static void stop_nop_trace(struct trace_array *tr)
24 /* Nothing to do! */ 45 /* Nothing to do! */
25} 46}
26 47
27static void nop_trace_init(struct trace_array *tr) 48static int nop_trace_init(struct trace_array *tr)
28{ 49{
29 int cpu; 50 int cpu;
30 ctx_trace = tr; 51 ctx_trace = tr;
@@ -32,33 +53,53 @@ static void nop_trace_init(struct trace_array *tr)
32 for_each_online_cpu(cpu) 53 for_each_online_cpu(cpu)
33 tracing_reset(tr, cpu); 54 tracing_reset(tr, cpu);
34 55
35 if (tr->ctrl) 56 start_nop_trace(tr);
36 start_nop_trace(tr); 57 return 0;
37} 58}
38 59
39static void nop_trace_reset(struct trace_array *tr) 60static void nop_trace_reset(struct trace_array *tr)
40{ 61{
41 if (tr->ctrl) 62 stop_nop_trace(tr);
42 stop_nop_trace(tr);
43} 63}
44 64
45static void nop_trace_ctrl_update(struct trace_array *tr) 65/* It only serves as a signal handler and a callback to
66 * accept or refuse tthe setting of a flag.
67 * If you don't implement it, then the flag setting will be
68 * automatically accepted.
69 */
70static int nop_set_flag(u32 old_flags, u32 bit, int set)
46{ 71{
47 /* When starting a new trace, reset the buffers */ 72 /*
48 if (tr->ctrl) 73 * Note that you don't need to update nop_flags.val yourself.
49 start_nop_trace(tr); 74 * The tracing Api will do it automatically if you return 0
50 else 75 */
51 stop_nop_trace(tr); 76 if (bit == TRACE_NOP_OPT_ACCEPT) {
77 printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept."
78 " Now cat trace_options to see the result\n",
79 set);
80 return 0;
81 }
82
83 if (bit == TRACE_NOP_OPT_REFUSE) {
84 printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse."
85 "Now cat trace_options to see the result\n",
86 set);
87 return -EINVAL;
88 }
89
90 return 0;
52} 91}
53 92
93
54struct tracer nop_trace __read_mostly = 94struct tracer nop_trace __read_mostly =
55{ 95{
56 .name = "nop", 96 .name = "nop",
57 .init = nop_trace_init, 97 .init = nop_trace_init,
58 .reset = nop_trace_reset, 98 .reset = nop_trace_reset,
59 .ctrl_update = nop_trace_ctrl_update,
60#ifdef CONFIG_FTRACE_SELFTEST 99#ifdef CONFIG_FTRACE_SELFTEST
61 .selftest = trace_selftest_startup_nop, 100 .selftest = trace_selftest_startup_nop,
62#endif 101#endif
102 .flags = &nop_flags,
103 .set_flag = nop_set_flag
63}; 104};
64 105
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
new file mode 100644
index 000000000000..a7172a352f62
--- /dev/null
+++ b/kernel/trace/trace_power.c
@@ -0,0 +1,179 @@
1/*
2 * ring buffer based C-state tracer
3 *
4 * Arjan van de Ven <arjan@linux.intel.com>
5 * Copyright (C) 2008 Intel Corporation
6 *
7 * Much is borrowed from trace_boot.c which is
8 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 *
10 */
11
12#include <linux/init.h>
13#include <linux/debugfs.h>
14#include <linux/ftrace.h>
15#include <linux/kallsyms.h>
16#include <linux/module.h>
17
18#include "trace.h"
19
20static struct trace_array *power_trace;
21static int __read_mostly trace_power_enabled;
22
23
24static void start_power_trace(struct trace_array *tr)
25{
26 trace_power_enabled = 1;
27}
28
29static void stop_power_trace(struct trace_array *tr)
30{
31 trace_power_enabled = 0;
32}
33
34
35static int power_trace_init(struct trace_array *tr)
36{
37 int cpu;
38 power_trace = tr;
39
40 trace_power_enabled = 1;
41
42 for_each_cpu_mask(cpu, cpu_possible_map)
43 tracing_reset(tr, cpu);
44 return 0;
45}
46
47static enum print_line_t power_print_line(struct trace_iterator *iter)
48{
49 int ret = 0;
50 struct trace_entry *entry = iter->ent;
51 struct trace_power *field ;
52 struct power_trace *it;
53 struct trace_seq *s = &iter->seq;
54 struct timespec stamp;
55 struct timespec duration;
56
57 trace_assign_type(field, entry);
58 it = &field->state_data;
59 stamp = ktime_to_timespec(it->stamp);
60 duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
61
62 if (entry->type == TRACE_POWER) {
63 if (it->type == POWER_CSTATE)
64 ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
65 stamp.tv_sec,
66 stamp.tv_nsec,
67 it->state, iter->cpu,
68 duration.tv_sec,
69 duration.tv_nsec);
70 if (it->type == POWER_PSTATE)
71 ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
72 stamp.tv_sec,
73 stamp.tv_nsec,
74 it->state, iter->cpu);
75 if (!ret)
76 return TRACE_TYPE_PARTIAL_LINE;
77 return TRACE_TYPE_HANDLED;
78 }
79 return TRACE_TYPE_UNHANDLED;
80}
81
82static struct tracer power_tracer __read_mostly =
83{
84 .name = "power",
85 .init = power_trace_init,
86 .start = start_power_trace,
87 .stop = stop_power_trace,
88 .reset = stop_power_trace,
89 .print_line = power_print_line,
90};
91
92static int init_power_trace(void)
93{
94 return register_tracer(&power_tracer);
95}
96device_initcall(init_power_trace);
97
98void trace_power_start(struct power_trace *it, unsigned int type,
99 unsigned int level)
100{
101 if (!trace_power_enabled)
102 return;
103
104 memset(it, 0, sizeof(struct power_trace));
105 it->state = level;
106 it->type = type;
107 it->stamp = ktime_get();
108}
109EXPORT_SYMBOL_GPL(trace_power_start);
110
111
112void trace_power_end(struct power_trace *it)
113{
114 struct ring_buffer_event *event;
115 struct trace_power *entry;
116 struct trace_array_cpu *data;
117 unsigned long irq_flags;
118 struct trace_array *tr = power_trace;
119
120 if (!trace_power_enabled)
121 return;
122
123 preempt_disable();
124 it->end = ktime_get();
125 data = tr->data[smp_processor_id()];
126
127 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
128 &irq_flags);
129 if (!event)
130 goto out;
131 entry = ring_buffer_event_data(event);
132 tracing_generic_entry_update(&entry->ent, 0, 0);
133 entry->ent.type = TRACE_POWER;
134 entry->state_data = *it;
135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
136
137 trace_wake_up();
138
139 out:
140 preempt_enable();
141}
142EXPORT_SYMBOL_GPL(trace_power_end);
143
144void trace_power_mark(struct power_trace *it, unsigned int type,
145 unsigned int level)
146{
147 struct ring_buffer_event *event;
148 struct trace_power *entry;
149 struct trace_array_cpu *data;
150 unsigned long irq_flags;
151 struct trace_array *tr = power_trace;
152
153 if (!trace_power_enabled)
154 return;
155
156 memset(it, 0, sizeof(struct power_trace));
157 it->state = level;
158 it->type = type;
159 it->stamp = ktime_get();
160 preempt_disable();
161 it->end = it->stamp;
162 data = tr->data[smp_processor_id()];
163
164 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
165 &irq_flags);
166 if (!event)
167 goto out;
168 entry = ring_buffer_event_data(event);
169 tracing_generic_entry_update(&entry->ent, 0, 0);
170 entry->ent.type = TRACE_POWER;
171 entry->state_data = *it;
172 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
173
174 trace_wake_up();
175
176 out:
177 preempt_enable();
178}
179EXPORT_SYMBOL_GPL(trace_power_mark);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index b8f56beb1a62..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -16,7 +16,8 @@
16 16
17static struct trace_array *ctx_trace; 17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19static atomic_t sched_ref; 19static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
20 21
21static void 22static void
22probe_sched_switch(struct rq *__rq, struct task_struct *prev, 23probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
27 int cpu; 28 int cpu;
28 int pc; 29 int pc;
29 30
30 if (!atomic_read(&sched_ref)) 31 if (!sched_ref)
31 return; 32 return;
32 33
33 tracing_record_cmdline(prev); 34 tracing_record_cmdline(prev);
@@ -48,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
48} 49}
49 50
50static void 51static void
51probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) 52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
52{ 53{
53 struct trace_array_cpu *data; 54 struct trace_array_cpu *data;
54 unsigned long flags; 55 unsigned long flags;
@@ -71,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
71 local_irq_restore(flags); 72 local_irq_restore(flags);
72} 73}
73 74
74static void sched_switch_reset(struct trace_array *tr)
75{
76 int cpu;
77
78 tr->time_start = ftrace_now(tr->cpu);
79
80 for_each_online_cpu(cpu)
81 tracing_reset(tr, cpu);
82}
83
84static int tracing_sched_register(void) 75static int tracing_sched_register(void)
85{ 76{
86 int ret; 77 int ret;
@@ -123,20 +114,18 @@ static void tracing_sched_unregister(void)
123 114
124static void tracing_start_sched_switch(void) 115static void tracing_start_sched_switch(void)
125{ 116{
126 long ref; 117 mutex_lock(&sched_register_mutex);
127 118 if (!(sched_ref++))
128 ref = atomic_inc_return(&sched_ref);
129 if (ref == 1)
130 tracing_sched_register(); 119 tracing_sched_register();
120 mutex_unlock(&sched_register_mutex);
131} 121}
132 122
133static void tracing_stop_sched_switch(void) 123static void tracing_stop_sched_switch(void)
134{ 124{
135 long ref; 125 mutex_lock(&sched_register_mutex);
136 126 if (!(--sched_ref))
137 ref = atomic_dec_and_test(&sched_ref);
138 if (ref)
139 tracing_sched_unregister(); 127 tracing_sched_unregister();
128 mutex_unlock(&sched_register_mutex);
140} 129}
141 130
142void tracing_start_cmdline_record(void) 131void tracing_start_cmdline_record(void)
@@ -149,40 +138,86 @@ void tracing_stop_cmdline_record(void)
149 tracing_stop_sched_switch(); 138 tracing_stop_sched_switch();
150} 139}
151 140
141/**
142 * tracing_start_sched_switch_record - start tracing context switches
143 *
144 * Turns on context switch tracing for a tracer.
145 */
146void tracing_start_sched_switch_record(void)
147{
148 if (unlikely(!ctx_trace)) {
149 WARN_ON(1);
150 return;
151 }
152
153 tracing_start_sched_switch();
154
155 mutex_lock(&sched_register_mutex);
156 tracer_enabled++;
157 mutex_unlock(&sched_register_mutex);
158}
159
160/**
161 * tracing_stop_sched_switch_record - start tracing context switches
162 *
163 * Turns off context switch tracing for a tracer.
164 */
165void tracing_stop_sched_switch_record(void)
166{
167 mutex_lock(&sched_register_mutex);
168 tracer_enabled--;
169 WARN_ON(tracer_enabled < 0);
170 mutex_unlock(&sched_register_mutex);
171
172 tracing_stop_sched_switch();
173}
174
175/**
176 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
177 * @tr: trace array pointer to assign
178 *
179 * Some tracers might want to record the context switches in their
180 * trace. This function lets those tracers assign the trace array
181 * to use.
182 */
183void tracing_sched_switch_assign_trace(struct trace_array *tr)
184{
185 ctx_trace = tr;
186}
187
152static void start_sched_trace(struct trace_array *tr) 188static void start_sched_trace(struct trace_array *tr)
153{ 189{
154 sched_switch_reset(tr); 190 tracing_reset_online_cpus(tr);
155 tracing_start_cmdline_record(); 191 tracing_start_sched_switch_record();
156 tracer_enabled = 1;
157} 192}
158 193
159static void stop_sched_trace(struct trace_array *tr) 194static void stop_sched_trace(struct trace_array *tr)
160{ 195{
161 tracer_enabled = 0; 196 tracing_stop_sched_switch_record();
162 tracing_stop_cmdline_record();
163} 197}
164 198
165static void sched_switch_trace_init(struct trace_array *tr) 199static int sched_switch_trace_init(struct trace_array *tr)
166{ 200{
167 ctx_trace = tr; 201 ctx_trace = tr;
168 202 start_sched_trace(tr);
169 if (tr->ctrl) 203 return 0;
170 start_sched_trace(tr);
171} 204}
172 205
173static void sched_switch_trace_reset(struct trace_array *tr) 206static void sched_switch_trace_reset(struct trace_array *tr)
174{ 207{
175 if (tr->ctrl) 208 if (sched_ref)
176 stop_sched_trace(tr); 209 stop_sched_trace(tr);
177} 210}
178 211
179static void sched_switch_trace_ctrl_update(struct trace_array *tr) 212static void sched_switch_trace_start(struct trace_array *tr)
180{ 213{
181 /* When starting a new trace, reset the buffers */ 214 tracing_reset_online_cpus(tr);
182 if (tr->ctrl) 215 tracing_start_sched_switch();
183 start_sched_trace(tr); 216}
184 else 217
185 stop_sched_trace(tr); 218static void sched_switch_trace_stop(struct trace_array *tr)
219{
220 tracing_stop_sched_switch();
186} 221}
187 222
188static struct tracer sched_switch_trace __read_mostly = 223static struct tracer sched_switch_trace __read_mostly =
@@ -190,7 +225,8 @@ static struct tracer sched_switch_trace __read_mostly =
190 .name = "sched_switch", 225 .name = "sched_switch",
191 .init = sched_switch_trace_init, 226 .init = sched_switch_trace_init,
192 .reset = sched_switch_trace_reset, 227 .reset = sched_switch_trace_reset,
193 .ctrl_update = sched_switch_trace_ctrl_update, 228 .start = sched_switch_trace_start,
229 .stop = sched_switch_trace_stop,
194#ifdef CONFIG_FTRACE_SELFTEST 230#ifdef CONFIG_FTRACE_SELFTEST
195 .selftest = trace_selftest_startup_sched_switch, 231 .selftest = trace_selftest_startup_sched_switch,
196#endif 232#endif
@@ -198,14 +234,7 @@ static struct tracer sched_switch_trace __read_mostly =
198 234
199__init static int init_sched_switch_trace(void) 235__init static int init_sched_switch_trace(void)
200{ 236{
201 int ret = 0;
202
203 if (atomic_read(&sched_ref))
204 ret = tracing_sched_register();
205 if (ret) {
206 pr_info("error registering scheduler trace\n");
207 return ret;
208 }
209 return register_tracer(&sched_switch_trace); 237 return register_tracer(&sched_switch_trace);
210} 238}
211device_initcall(init_sched_switch_trace); 239device_initcall(init_sched_switch_trace);
240
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3ae93f16b565..43586b689e31 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
50 return; 50 return;
51 51
52 pc = preempt_count(); 52 pc = preempt_count();
53 resched = need_resched(); 53 resched = ftrace_preempt_disable();
54 preempt_disable_notrace();
55 54
56 cpu = raw_smp_processor_id(); 55 cpu = raw_smp_processor_id();
57 data = tr->data[cpu]; 56 data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
81 out: 80 out:
82 atomic_dec(&data->disabled); 81 atomic_dec(&data->disabled);
83 82
84 /* 83 ftrace_preempt_enable(resched);
85 * To prevent recursion from the scheduler, if the
86 * resched flag was set before we entered, then
87 * don't reschedule.
88 */
89 if (resched)
90 preempt_enable_no_resched_notrace();
91 else
92 preempt_enable_notrace();
93} 84}
94 85
95static struct ftrace_ops trace_ops __read_mostly = 86static struct ftrace_ops trace_ops __read_mostly =
@@ -220,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
220} 211}
221 212
222static void 213static void
223probe_wakeup(struct rq *rq, struct task_struct *p) 214probe_wakeup(struct rq *rq, struct task_struct *p, int success)
224{ 215{
225 int cpu = smp_processor_id(); 216 int cpu = smp_processor_id();
226 unsigned long flags; 217 unsigned long flags;
@@ -271,6 +262,12 @@ out:
271 atomic_dec(&wakeup_trace->data[cpu]->disabled); 262 atomic_dec(&wakeup_trace->data[cpu]->disabled);
272} 263}
273 264
265/*
266 * save_tracer_enabled is used to save the state of the tracer_enabled
267 * variable when we disable it when we open a trace output file.
268 */
269static int save_tracer_enabled;
270
274static void start_wakeup_tracer(struct trace_array *tr) 271static void start_wakeup_tracer(struct trace_array *tr)
275{ 272{
276 int ret; 273 int ret;
@@ -309,7 +306,13 @@ static void start_wakeup_tracer(struct trace_array *tr)
309 306
310 register_ftrace_function(&trace_ops); 307 register_ftrace_function(&trace_ops);
311 308
312 tracer_enabled = 1; 309 if (tracing_is_enabled()) {
310 tracer_enabled = 1;
311 save_tracer_enabled = 1;
312 } else {
313 tracer_enabled = 0;
314 save_tracer_enabled = 0;
315 }
313 316
314 return; 317 return;
315fail_deprobe_wake_new: 318fail_deprobe_wake_new:
@@ -321,49 +324,53 @@ fail_deprobe:
321static void stop_wakeup_tracer(struct trace_array *tr) 324static void stop_wakeup_tracer(struct trace_array *tr)
322{ 325{
323 tracer_enabled = 0; 326 tracer_enabled = 0;
327 save_tracer_enabled = 0;
324 unregister_ftrace_function(&trace_ops); 328 unregister_ftrace_function(&trace_ops);
325 unregister_trace_sched_switch(probe_wakeup_sched_switch); 329 unregister_trace_sched_switch(probe_wakeup_sched_switch);
326 unregister_trace_sched_wakeup_new(probe_wakeup); 330 unregister_trace_sched_wakeup_new(probe_wakeup);
327 unregister_trace_sched_wakeup(probe_wakeup); 331 unregister_trace_sched_wakeup(probe_wakeup);
328} 332}
329 333
330static void wakeup_tracer_init(struct trace_array *tr) 334static int wakeup_tracer_init(struct trace_array *tr)
331{ 335{
332 wakeup_trace = tr; 336 wakeup_trace = tr;
333 337 start_wakeup_tracer(tr);
334 if (tr->ctrl) 338 return 0;
335 start_wakeup_tracer(tr);
336} 339}
337 340
338static void wakeup_tracer_reset(struct trace_array *tr) 341static void wakeup_tracer_reset(struct trace_array *tr)
339{ 342{
340 if (tr->ctrl) { 343 stop_wakeup_tracer(tr);
341 stop_wakeup_tracer(tr); 344 /* make sure we put back any tasks we are tracing */
342 /* make sure we put back any tasks we are tracing */ 345 wakeup_reset(tr);
343 wakeup_reset(tr); 346}
344 } 347
348static void wakeup_tracer_start(struct trace_array *tr)
349{
350 wakeup_reset(tr);
351 tracer_enabled = 1;
352 save_tracer_enabled = 1;
345} 353}
346 354
347static void wakeup_tracer_ctrl_update(struct trace_array *tr) 355static void wakeup_tracer_stop(struct trace_array *tr)
348{ 356{
349 if (tr->ctrl) 357 tracer_enabled = 0;
350 start_wakeup_tracer(tr); 358 save_tracer_enabled = 0;
351 else
352 stop_wakeup_tracer(tr);
353} 359}
354 360
355static void wakeup_tracer_open(struct trace_iterator *iter) 361static void wakeup_tracer_open(struct trace_iterator *iter)
356{ 362{
357 /* stop the trace while dumping */ 363 /* stop the trace while dumping */
358 if (iter->tr->ctrl) 364 tracer_enabled = 0;
359 stop_wakeup_tracer(iter->tr);
360} 365}
361 366
362static void wakeup_tracer_close(struct trace_iterator *iter) 367static void wakeup_tracer_close(struct trace_iterator *iter)
363{ 368{
364 /* forget about any processes we were recording */ 369 /* forget about any processes we were recording */
365 if (iter->tr->ctrl) 370 if (save_tracer_enabled) {
366 start_wakeup_tracer(iter->tr); 371 wakeup_reset(iter->tr);
372 tracer_enabled = 1;
373 }
367} 374}
368 375
369static struct tracer wakeup_tracer __read_mostly = 376static struct tracer wakeup_tracer __read_mostly =
@@ -371,9 +378,10 @@ static struct tracer wakeup_tracer __read_mostly =
371 .name = "wakeup", 378 .name = "wakeup",
372 .init = wakeup_tracer_init, 379 .init = wakeup_tracer_init,
373 .reset = wakeup_tracer_reset, 380 .reset = wakeup_tracer_reset,
381 .start = wakeup_tracer_start,
382 .stop = wakeup_tracer_stop,
374 .open = wakeup_tracer_open, 383 .open = wakeup_tracer_open,
375 .close = wakeup_tracer_close, 384 .close = wakeup_tracer_close,
376 .ctrl_update = wakeup_tracer_ctrl_update,
377 .print_max = 1, 385 .print_max = 1,
378#ifdef CONFIG_FTRACE_SELFTEST 386#ifdef CONFIG_FTRACE_SELFTEST
379 .selftest = trace_selftest_startup_wakeup, 387 .selftest = trace_selftest_startup_wakeup,
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 90bc752a7580..88c8eb70f54a 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
13 case TRACE_STACK: 13 case TRACE_STACK:
14 case TRACE_PRINT: 14 case TRACE_PRINT:
15 case TRACE_SPECIAL: 15 case TRACE_SPECIAL:
16 case TRACE_BRANCH:
16 return 1; 17 return 1;
17 } 18 }
18 return 0; 19 return 0;
@@ -51,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
51 int cpu, ret = 0; 52 int cpu, ret = 0;
52 53
53 /* Don't allow flipping of max traces now */ 54 /* Don't allow flipping of max traces now */
54 raw_local_irq_save(flags); 55 local_irq_save(flags);
55 __raw_spin_lock(&ftrace_max_lock); 56 __raw_spin_lock(&ftrace_max_lock);
56 57
57 cnt = ring_buffer_entries(tr->buffer); 58 cnt = ring_buffer_entries(tr->buffer);
@@ -62,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62 break; 63 break;
63 } 64 }
64 __raw_spin_unlock(&ftrace_max_lock); 65 __raw_spin_unlock(&ftrace_max_lock);
65 raw_local_irq_restore(flags); 66 local_irq_restore(flags);
66 67
67 if (count) 68 if (count)
68 *count = cnt; 69 *count = cnt;
@@ -70,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
70 return ret; 71 return ret;
71} 72}
72 73
74static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
75{
76 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
77 trace->name, init_ret);
78}
73#ifdef CONFIG_FUNCTION_TRACER 79#ifdef CONFIG_FUNCTION_TRACER
74 80
75#ifdef CONFIG_DYNAMIC_FTRACE 81#ifdef CONFIG_DYNAMIC_FTRACE
@@ -110,8 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
110 ftrace_set_filter(func_name, strlen(func_name), 1); 116 ftrace_set_filter(func_name, strlen(func_name), 1);
111 117
112 /* enable tracing */ 118 /* enable tracing */
113 tr->ctrl = 1; 119 ret = trace->init(tr);
114 trace->init(tr); 120 if (ret) {
121 warn_failed_init_tracer(trace, ret);
122 goto out;
123 }
115 124
116 /* Sleep for a 1/10 of a second */ 125 /* Sleep for a 1/10 of a second */
117 msleep(100); 126 msleep(100);
@@ -134,13 +143,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
134 msleep(100); 143 msleep(100);
135 144
136 /* stop the tracing. */ 145 /* stop the tracing. */
137 tr->ctrl = 0; 146 tracing_stop();
138 trace->ctrl_update(tr);
139 ftrace_enabled = 0; 147 ftrace_enabled = 0;
140 148
141 /* check the trace buffer */ 149 /* check the trace buffer */
142 ret = trace_test_buffer(tr, &count); 150 ret = trace_test_buffer(tr, &count);
143 trace->reset(tr); 151 trace->reset(tr);
152 tracing_start();
144 153
145 /* we should only have one item */ 154 /* we should only have one item */
146 if (!ret && count != 1) { 155 if (!ret && count != 1) {
@@ -148,6 +157,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
148 ret = -1; 157 ret = -1;
149 goto out; 158 goto out;
150 } 159 }
160
151 out: 161 out:
152 ftrace_enabled = save_ftrace_enabled; 162 ftrace_enabled = save_ftrace_enabled;
153 tracer_enabled = save_tracer_enabled; 163 tracer_enabled = save_tracer_enabled;
@@ -180,18 +190,22 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
180 ftrace_enabled = 1; 190 ftrace_enabled = 1;
181 tracer_enabled = 1; 191 tracer_enabled = 1;
182 192
183 tr->ctrl = 1; 193 ret = trace->init(tr);
184 trace->init(tr); 194 if (ret) {
195 warn_failed_init_tracer(trace, ret);
196 goto out;
197 }
198
185 /* Sleep for a 1/10 of a second */ 199 /* Sleep for a 1/10 of a second */
186 msleep(100); 200 msleep(100);
187 /* stop the tracing. */ 201 /* stop the tracing. */
188 tr->ctrl = 0; 202 tracing_stop();
189 trace->ctrl_update(tr);
190 ftrace_enabled = 0; 203 ftrace_enabled = 0;
191 204
192 /* check the trace buffer */ 205 /* check the trace buffer */
193 ret = trace_test_buffer(tr, &count); 206 ret = trace_test_buffer(tr, &count);
194 trace->reset(tr); 207 trace->reset(tr);
208 tracing_start();
195 209
196 if (!ret && !count) { 210 if (!ret && !count) {
197 printk(KERN_CONT ".. no entries found .."); 211 printk(KERN_CONT ".. no entries found ..");
@@ -223,8 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
223 int ret; 237 int ret;
224 238
225 /* start the tracing */ 239 /* start the tracing */
226 tr->ctrl = 1; 240 ret = trace->init(tr);
227 trace->init(tr); 241 if (ret) {
242 warn_failed_init_tracer(trace, ret);
243 return ret;
244 }
245
228 /* reset the max latency */ 246 /* reset the max latency */
229 tracing_max_latency = 0; 247 tracing_max_latency = 0;
230 /* disable interrupts for a bit */ 248 /* disable interrupts for a bit */
@@ -232,13 +250,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
232 udelay(100); 250 udelay(100);
233 local_irq_enable(); 251 local_irq_enable();
234 /* stop the tracing. */ 252 /* stop the tracing. */
235 tr->ctrl = 0; 253 tracing_stop();
236 trace->ctrl_update(tr);
237 /* check both trace buffers */ 254 /* check both trace buffers */
238 ret = trace_test_buffer(tr, NULL); 255 ret = trace_test_buffer(tr, NULL);
239 if (!ret) 256 if (!ret)
240 ret = trace_test_buffer(&max_tr, &count); 257 ret = trace_test_buffer(&max_tr, &count);
241 trace->reset(tr); 258 trace->reset(tr);
259 tracing_start();
242 260
243 if (!ret && !count) { 261 if (!ret && !count) {
244 printk(KERN_CONT ".. no entries found .."); 262 printk(KERN_CONT ".. no entries found ..");
@@ -259,9 +277,26 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
259 unsigned long count; 277 unsigned long count;
260 int ret; 278 int ret;
261 279
280 /*
281 * Now that the big kernel lock is no longer preemptable,
282 * and this is called with the BKL held, it will always
283 * fail. If preemption is already disabled, simply
284 * pass the test. When the BKL is removed, or becomes
285 * preemptible again, we will once again test this,
286 * so keep it in.
287 */
288 if (preempt_count()) {
289 printk(KERN_CONT "can not test ... force ");
290 return 0;
291 }
292
262 /* start the tracing */ 293 /* start the tracing */
263 tr->ctrl = 1; 294 ret = trace->init(tr);
264 trace->init(tr); 295 if (ret) {
296 warn_failed_init_tracer(trace, ret);
297 return ret;
298 }
299
265 /* reset the max latency */ 300 /* reset the max latency */
266 tracing_max_latency = 0; 301 tracing_max_latency = 0;
267 /* disable preemption for a bit */ 302 /* disable preemption for a bit */
@@ -269,13 +304,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
269 udelay(100); 304 udelay(100);
270 preempt_enable(); 305 preempt_enable();
271 /* stop the tracing. */ 306 /* stop the tracing. */
272 tr->ctrl = 0; 307 tracing_stop();
273 trace->ctrl_update(tr);
274 /* check both trace buffers */ 308 /* check both trace buffers */
275 ret = trace_test_buffer(tr, NULL); 309 ret = trace_test_buffer(tr, NULL);
276 if (!ret) 310 if (!ret)
277 ret = trace_test_buffer(&max_tr, &count); 311 ret = trace_test_buffer(&max_tr, &count);
278 trace->reset(tr); 312 trace->reset(tr);
313 tracing_start();
279 314
280 if (!ret && !count) { 315 if (!ret && !count) {
281 printk(KERN_CONT ".. no entries found .."); 316 printk(KERN_CONT ".. no entries found ..");
@@ -296,9 +331,25 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
296 unsigned long count; 331 unsigned long count;
297 int ret; 332 int ret;
298 333
334 /*
335 * Now that the big kernel lock is no longer preemptable,
336 * and this is called with the BKL held, it will always
337 * fail. If preemption is already disabled, simply
338 * pass the test. When the BKL is removed, or becomes
339 * preemptible again, we will once again test this,
340 * so keep it in.
341 */
342 if (preempt_count()) {
343 printk(KERN_CONT "can not test ... force ");
344 return 0;
345 }
346
299 /* start the tracing */ 347 /* start the tracing */
300 tr->ctrl = 1; 348 ret = trace->init(tr);
301 trace->init(tr); 349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
352 }
302 353
303 /* reset the max latency */ 354 /* reset the max latency */
304 tracing_max_latency = 0; 355 tracing_max_latency = 0;
@@ -312,27 +363,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
312 local_irq_enable(); 363 local_irq_enable();
313 364
314 /* stop the tracing. */ 365 /* stop the tracing. */
315 tr->ctrl = 0; 366 tracing_stop();
316 trace->ctrl_update(tr);
317 /* check both trace buffers */ 367 /* check both trace buffers */
318 ret = trace_test_buffer(tr, NULL); 368 ret = trace_test_buffer(tr, NULL);
319 if (ret) 369 if (ret) {
370 tracing_start();
320 goto out; 371 goto out;
372 }
321 373
322 ret = trace_test_buffer(&max_tr, &count); 374 ret = trace_test_buffer(&max_tr, &count);
323 if (ret) 375 if (ret) {
376 tracing_start();
324 goto out; 377 goto out;
378 }
325 379
326 if (!ret && !count) { 380 if (!ret && !count) {
327 printk(KERN_CONT ".. no entries found .."); 381 printk(KERN_CONT ".. no entries found ..");
328 ret = -1; 382 ret = -1;
383 tracing_start();
329 goto out; 384 goto out;
330 } 385 }
331 386
332 /* do the test by disabling interrupts first this time */ 387 /* do the test by disabling interrupts first this time */
333 tracing_max_latency = 0; 388 tracing_max_latency = 0;
334 tr->ctrl = 1; 389 tracing_start();
335 trace->ctrl_update(tr);
336 preempt_disable(); 390 preempt_disable();
337 local_irq_disable(); 391 local_irq_disable();
338 udelay(100); 392 udelay(100);
@@ -341,8 +395,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
341 local_irq_enable(); 395 local_irq_enable();
342 396
343 /* stop the tracing. */ 397 /* stop the tracing. */
344 tr->ctrl = 0; 398 tracing_stop();
345 trace->ctrl_update(tr);
346 /* check both trace buffers */ 399 /* check both trace buffers */
347 ret = trace_test_buffer(tr, NULL); 400 ret = trace_test_buffer(tr, NULL);
348 if (ret) 401 if (ret)
@@ -358,6 +411,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
358 411
359 out: 412 out:
360 trace->reset(tr); 413 trace->reset(tr);
414 tracing_start();
361 tracing_max_latency = save_max; 415 tracing_max_latency = save_max;
362 416
363 return ret; 417 return ret;
@@ -423,8 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
423 wait_for_completion(&isrt); 477 wait_for_completion(&isrt);
424 478
425 /* start the tracing */ 479 /* start the tracing */
426 tr->ctrl = 1; 480 ret = trace->init(tr);
427 trace->init(tr); 481 if (ret) {
482 warn_failed_init_tracer(trace, ret);
483 return ret;
484 }
485
428 /* reset the max latency */ 486 /* reset the max latency */
429 tracing_max_latency = 0; 487 tracing_max_latency = 0;
430 488
@@ -448,8 +506,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
448 msleep(100); 506 msleep(100);
449 507
450 /* stop the tracing. */ 508 /* stop the tracing. */
451 tr->ctrl = 0; 509 tracing_stop();
452 trace->ctrl_update(tr);
453 /* check both trace buffers */ 510 /* check both trace buffers */
454 ret = trace_test_buffer(tr, NULL); 511 ret = trace_test_buffer(tr, NULL);
455 if (!ret) 512 if (!ret)
@@ -457,6 +514,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
457 514
458 515
459 trace->reset(tr); 516 trace->reset(tr);
517 tracing_start();
460 518
461 tracing_max_latency = save_max; 519 tracing_max_latency = save_max;
462 520
@@ -480,16 +538,20 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
480 int ret; 538 int ret;
481 539
482 /* start the tracing */ 540 /* start the tracing */
483 tr->ctrl = 1; 541 ret = trace->init(tr);
484 trace->init(tr); 542 if (ret) {
543 warn_failed_init_tracer(trace, ret);
544 return ret;
545 }
546
485 /* Sleep for a 1/10 of a second */ 547 /* Sleep for a 1/10 of a second */
486 msleep(100); 548 msleep(100);
487 /* stop the tracing. */ 549 /* stop the tracing. */
488 tr->ctrl = 0; 550 tracing_stop();
489 trace->ctrl_update(tr);
490 /* check the trace buffer */ 551 /* check the trace buffer */
491 ret = trace_test_buffer(tr, &count); 552 ret = trace_test_buffer(tr, &count);
492 trace->reset(tr); 553 trace->reset(tr);
554 tracing_start();
493 555
494 if (!ret && !count) { 556 if (!ret && !count) {
495 printk(KERN_CONT ".. no entries found .."); 557 printk(KERN_CONT ".. no entries found ..");
@@ -508,17 +570,48 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
508 int ret; 570 int ret;
509 571
510 /* start the tracing */ 572 /* start the tracing */
511 tr->ctrl = 1; 573 ret = trace->init(tr);
512 trace->init(tr); 574 if (ret) {
575 warn_failed_init_tracer(trace, ret);
576 return 0;
577 }
578
513 /* Sleep for a 1/10 of a second */ 579 /* Sleep for a 1/10 of a second */
514 msleep(100); 580 msleep(100);
515 /* stop the tracing. */ 581 /* stop the tracing. */
516 tr->ctrl = 0; 582 tracing_stop();
517 trace->ctrl_update(tr);
518 /* check the trace buffer */ 583 /* check the trace buffer */
519 ret = trace_test_buffer(tr, &count); 584 ret = trace_test_buffer(tr, &count);
520 trace->reset(tr); 585 trace->reset(tr);
586 tracing_start();
521 587
522 return ret; 588 return ret;
523} 589}
524#endif /* CONFIG_SYSPROF_TRACER */ 590#endif /* CONFIG_SYSPROF_TRACER */
591
592#ifdef CONFIG_BRANCH_TRACER
593int
594trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
595{
596 unsigned long count;
597 int ret;
598
599 /* start the tracing */
600 ret = trace->init(tr);
601 if (ret) {
602 warn_failed_init_tracer(trace, ret);
603 return ret;
604 }
605
606 /* Sleep for a 1/10 of a second */
607 msleep(100);
608 /* stop the tracing. */
609 tracing_stop();
610 /* check the trace buffer */
611 ret = trace_test_buffer(tr, &count);
612 trace->reset(tr);
613 tracing_start();
614
615 return ret;
616}
617#endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index be682b62fe58..d0871bc0aca5 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -10,6 +10,7 @@
10#include <linux/debugfs.h> 10#include <linux/debugfs.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/sysctl.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
15#include "trace.h" 16#include "trace.h"
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock =
31 32
32static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
33static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
35static DEFINE_MUTEX(stack_sysctl_mutex);
36
37int stack_tracer_enabled;
38static int last_stack_tracer_enabled;
34 39
35static inline void check_stack(void) 40static inline void check_stack(void)
36{ 41{
@@ -48,7 +53,7 @@ static inline void check_stack(void)
48 if (!object_is_on_stack(&this_size)) 53 if (!object_is_on_stack(&this_size))
49 return; 54 return;
50 55
51 raw_local_irq_save(flags); 56 local_irq_save(flags);
52 __raw_spin_lock(&max_stack_lock); 57 __raw_spin_lock(&max_stack_lock);
53 58
54 /* a race could have already updated it */ 59 /* a race could have already updated it */
@@ -78,6 +83,7 @@ static inline void check_stack(void)
78 * on a new max, so it is far from a fast path. 83 * on a new max, so it is far from a fast path.
79 */ 84 */
80 while (i < max_stack_trace.nr_entries) { 85 while (i < max_stack_trace.nr_entries) {
86 int found = 0;
81 87
82 stack_dump_index[i] = this_size; 88 stack_dump_index[i] = this_size;
83 p = start; 89 p = start;
@@ -86,17 +92,19 @@ static inline void check_stack(void)
86 if (*p == stack_dump_trace[i]) { 92 if (*p == stack_dump_trace[i]) {
87 this_size = stack_dump_index[i++] = 93 this_size = stack_dump_index[i++] =
88 (top - p) * sizeof(unsigned long); 94 (top - p) * sizeof(unsigned long);
95 found = 1;
89 /* Start the search from here */ 96 /* Start the search from here */
90 start = p + 1; 97 start = p + 1;
91 } 98 }
92 } 99 }
93 100
94 i++; 101 if (!found)
102 i++;
95 } 103 }
96 104
97 out: 105 out:
98 __raw_spin_unlock(&max_stack_lock); 106 __raw_spin_unlock(&max_stack_lock);
99 raw_local_irq_restore(flags); 107 local_irq_restore(flags);
100} 108}
101 109
102static void 110static void
@@ -107,8 +115,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
107 if (unlikely(!ftrace_enabled || stack_trace_disabled)) 115 if (unlikely(!ftrace_enabled || stack_trace_disabled))
108 return; 116 return;
109 117
110 resched = need_resched(); 118 resched = ftrace_preempt_disable();
111 preempt_disable_notrace();
112 119
113 cpu = raw_smp_processor_id(); 120 cpu = raw_smp_processor_id();
114 /* no atomic needed, we only modify this variable by this cpu */ 121 /* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
120 out: 127 out:
121 per_cpu(trace_active, cpu)--; 128 per_cpu(trace_active, cpu)--;
122 /* prevent recursion in schedule */ 129 /* prevent recursion in schedule */
123 if (resched) 130 ftrace_preempt_enable(resched);
124 preempt_enable_no_resched_notrace();
125 else
126 preempt_enable_notrace();
127} 131}
128 132
129static struct ftrace_ops trace_ops __read_mostly = 133static struct ftrace_ops trace_ops __read_mostly =
@@ -166,16 +170,16 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
166 if (ret < 0) 170 if (ret < 0)
167 return ret; 171 return ret;
168 172
169 raw_local_irq_save(flags); 173 local_irq_save(flags);
170 __raw_spin_lock(&max_stack_lock); 174 __raw_spin_lock(&max_stack_lock);
171 *ptr = val; 175 *ptr = val;
172 __raw_spin_unlock(&max_stack_lock); 176 __raw_spin_unlock(&max_stack_lock);
173 raw_local_irq_restore(flags); 177 local_irq_restore(flags);
174 178
175 return count; 179 return count;
176} 180}
177 181
178static struct file_operations stack_max_size_fops = { 182static const struct file_operations stack_max_size_fops = {
179 .open = tracing_open_generic, 183 .open = tracing_open_generic,
180 .read = stack_max_size_read, 184 .read = stack_max_size_read,
181 .write = stack_max_size_write, 185 .write = stack_max_size_write,
@@ -184,11 +188,16 @@ static struct file_operations stack_max_size_fops = {
184static void * 188static void *
185t_next(struct seq_file *m, void *v, loff_t *pos) 189t_next(struct seq_file *m, void *v, loff_t *pos)
186{ 190{
187 long i = (long)m->private; 191 long i;
188 192
189 (*pos)++; 193 (*pos)++;
190 194
191 i++; 195 if (v == SEQ_START_TOKEN)
196 i = 0;
197 else {
198 i = *(long *)v;
199 i++;
200 }
192 201
193 if (i >= max_stack_trace.nr_entries || 202 if (i >= max_stack_trace.nr_entries ||
194 stack_dump_trace[i] == ULONG_MAX) 203 stack_dump_trace[i] == ULONG_MAX)
@@ -201,12 +210,15 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
201 210
202static void *t_start(struct seq_file *m, loff_t *pos) 211static void *t_start(struct seq_file *m, loff_t *pos)
203{ 212{
204 void *t = &m->private; 213 void *t = SEQ_START_TOKEN;
205 loff_t l = 0; 214 loff_t l = 0;
206 215
207 local_irq_disable(); 216 local_irq_disable();
208 __raw_spin_lock(&max_stack_lock); 217 __raw_spin_lock(&max_stack_lock);
209 218
219 if (*pos == 0)
220 return SEQ_START_TOKEN;
221
210 for (; t && l < *pos; t = t_next(m, t, &l)) 222 for (; t && l < *pos; t = t_next(m, t, &l))
211 ; 223 ;
212 224
@@ -235,10 +247,10 @@ static int trace_lookup_stack(struct seq_file *m, long i)
235 247
236static int t_show(struct seq_file *m, void *v) 248static int t_show(struct seq_file *m, void *v)
237{ 249{
238 long i = *(long *)v; 250 long i;
239 int size; 251 int size;
240 252
241 if (i < 0) { 253 if (v == SEQ_START_TOKEN) {
242 seq_printf(m, " Depth Size Location" 254 seq_printf(m, " Depth Size Location"
243 " (%d entries)\n" 255 " (%d entries)\n"
244 " ----- ---- --------\n", 256 " ----- ---- --------\n",
@@ -246,6 +258,8 @@ static int t_show(struct seq_file *m, void *v)
246 return 0; 258 return 0;
247 } 259 }
248 260
261 i = *(long *)v;
262
249 if (i >= max_stack_trace.nr_entries || 263 if (i >= max_stack_trace.nr_entries ||
250 stack_dump_trace[i] == ULONG_MAX) 264 stack_dump_trace[i] == ULONG_MAX)
251 return 0; 265 return 0;
@@ -263,7 +277,7 @@ static int t_show(struct seq_file *m, void *v)
263 return 0; 277 return 0;
264} 278}
265 279
266static struct seq_operations stack_trace_seq_ops = { 280static const struct seq_operations stack_trace_seq_ops = {
267 .start = t_start, 281 .start = t_start,
268 .next = t_next, 282 .next = t_next,
269 .stop = t_stop, 283 .stop = t_stop,
@@ -275,20 +289,51 @@ static int stack_trace_open(struct inode *inode, struct file *file)
275 int ret; 289 int ret;
276 290
277 ret = seq_open(file, &stack_trace_seq_ops); 291 ret = seq_open(file, &stack_trace_seq_ops);
278 if (!ret) {
279 struct seq_file *m = file->private_data;
280 m->private = (void *)-1;
281 }
282 292
283 return ret; 293 return ret;
284} 294}
285 295
286static struct file_operations stack_trace_fops = { 296static const struct file_operations stack_trace_fops = {
287 .open = stack_trace_open, 297 .open = stack_trace_open,
288 .read = seq_read, 298 .read = seq_read,
289 .llseek = seq_lseek, 299 .llseek = seq_lseek,
290}; 300};
291 301
302int
303stack_trace_sysctl(struct ctl_table *table, int write,
304 struct file *file, void __user *buffer, size_t *lenp,
305 loff_t *ppos)
306{
307 int ret;
308
309 mutex_lock(&stack_sysctl_mutex);
310
311 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
312
313 if (ret || !write ||
314 (last_stack_tracer_enabled == stack_tracer_enabled))
315 goto out;
316
317 last_stack_tracer_enabled = stack_tracer_enabled;
318
319 if (stack_tracer_enabled)
320 register_ftrace_function(&trace_ops);
321 else
322 unregister_ftrace_function(&trace_ops);
323
324 out:
325 mutex_unlock(&stack_sysctl_mutex);
326 return ret;
327}
328
329static __init int enable_stacktrace(char *str)
330{
331 stack_tracer_enabled = 1;
332 last_stack_tracer_enabled = 1;
333 return 1;
334}
335__setup("stacktrace", enable_stacktrace);
336
292static __init int stack_trace_init(void) 337static __init int stack_trace_init(void)
293{ 338{
294 struct dentry *d_tracer; 339 struct dentry *d_tracer;
@@ -306,7 +351,8 @@ static __init int stack_trace_init(void)
306 if (!entry) 351 if (!entry)
307 pr_warning("Could not create debugfs 'stack_trace' entry\n"); 352 pr_warning("Could not create debugfs 'stack_trace' entry\n");
308 353
309 register_ftrace_function(&trace_ops); 354 if (stack_tracer_enabled)
355 register_ftrace_function(&trace_ops);
310 356
311 return 0; 357 return 0;
312} 358}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 9587d3bcba55..a5779bd975db 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -202,7 +202,6 @@ static void start_stack_timer(int cpu)
202 202
203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 hrtimer->function = stack_trace_timer_fn; 204 hrtimer->function = stack_trace_timer_fn;
205 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
206 205
207 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); 206 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
208} 207}
@@ -234,20 +233,10 @@ static void stop_stack_timers(void)
234 stop_stack_timer(cpu); 233 stop_stack_timer(cpu);
235} 234}
236 235
237static void stack_reset(struct trace_array *tr)
238{
239 int cpu;
240
241 tr->time_start = ftrace_now(tr->cpu);
242
243 for_each_online_cpu(cpu)
244 tracing_reset(tr, cpu);
245}
246
247static void start_stack_trace(struct trace_array *tr) 236static void start_stack_trace(struct trace_array *tr)
248{ 237{
249 mutex_lock(&sample_timer_lock); 238 mutex_lock(&sample_timer_lock);
250 stack_reset(tr); 239 tracing_reset_online_cpus(tr);
251 start_stack_timers(); 240 start_stack_timers();
252 tracer_enabled = 1; 241 tracer_enabled = 1;
253 mutex_unlock(&sample_timer_lock); 242 mutex_unlock(&sample_timer_lock);
@@ -261,27 +250,17 @@ static void stop_stack_trace(struct trace_array *tr)
261 mutex_unlock(&sample_timer_lock); 250 mutex_unlock(&sample_timer_lock);
262} 251}
263 252
264static void stack_trace_init(struct trace_array *tr) 253static int stack_trace_init(struct trace_array *tr)
265{ 254{
266 sysprof_trace = tr; 255 sysprof_trace = tr;
267 256
268 if (tr->ctrl) 257 start_stack_trace(tr);
269 start_stack_trace(tr); 258 return 0;
270} 259}
271 260
272static void stack_trace_reset(struct trace_array *tr) 261static void stack_trace_reset(struct trace_array *tr)
273{ 262{
274 if (tr->ctrl) 263 stop_stack_trace(tr);
275 stop_stack_trace(tr);
276}
277
278static void stack_trace_ctrl_update(struct trace_array *tr)
279{
280 /* When starting a new trace, reset the buffers */
281 if (tr->ctrl)
282 start_stack_trace(tr);
283 else
284 stop_stack_trace(tr);
285} 264}
286 265
287static struct tracer stack_trace __read_mostly = 266static struct tracer stack_trace __read_mostly =
@@ -289,7 +268,6 @@ static struct tracer stack_trace __read_mostly =
289 .name = "sysprof", 268 .name = "sysprof",
290 .init = stack_trace_init, 269 .init = stack_trace_init,
291 .reset = stack_trace_reset, 270 .reset = stack_trace_reset,
292 .ctrl_update = stack_trace_ctrl_update,
293#ifdef CONFIG_FTRACE_SELFTEST 271#ifdef CONFIG_FTRACE_SELFTEST
294 .selftest = trace_selftest_startup_sysprof, 272 .selftest = trace_selftest_startup_sysprof,
295#endif 273#endif