aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-09 19:39:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-09 19:39:15 -0400
commit214b93132023cc9305d5801add812515bea4d7d0 (patch)
treebb8db8677dd80b6ef570b8aa59475b072b81db11
parent14208b0ec56919f5333dd654b1a7d10765d0ad05 (diff)
parenta9fcaaac37b3baba1343f906f52aeb65c4d4e356 (diff)
Merge tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Lots of tweaks, small fixes, optimizations, and some helper functions to help out the rest of the kernel to ease their use of trace events. The big change for this release is the allowing of other tracers, such as the latency tracers, to be used in the trace instances and allow for function or function graph tracing to be in the top level simultaneously" * tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits) tracing: Fix memory leak on instance deletion tracing: Fix leak of ring buffer data when new instances creation fails tracing/kprobes: Avoid self tests if tracing is disabled on boot up tracing: Return error if ftrace_trace_arrays list is empty tracing: Only calculate stats of tracepoint benchmarks for 2^32 times tracing: Convert stddev into u64 in tracepoint benchmark tracing: Introduce saved_cmdlines_size file tracing: Add __get_dynamic_array_len() macro for trace events tracing: Remove unused variable in trace_benchmark tracing: Eliminate double free on failure of allocation on boot up ftrace/x86: Call text_ip_addr() instead of the duplicated code tracing: Print max callstack on stacktrace bug tracing: Move locking of trace_cmdline_lock into start/stop seq calls tracing: Try again for saved cmdline if failed due to locking tracing: Have saved_cmdlines use the seq_read infrastructure tracing: Add tracepoint benchmark tracepoint tracing: Print nasty banner when trace_printk() is in use tracing: Add funcgraph_tail option to print function name after closing braces tracing: Eliminate duplicate TRACE_GRAPH_PRINT_xx defines tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks ...
-rw-r--r--Documentation/trace/ftrace.txt26
-rw-r--r--Documentation/trace/tracepoints.txt24
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/entry_64.S204
-rw-r--r--arch/x86/kernel/ftrace.c56
-rw-r--r--arch/x86/kernel/mcount_64.S217
-rw-r--r--include/linux/ftrace.h24
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/trace_seq.h10
-rw-r--r--include/linux/tracepoint.h10
-rw-r--r--include/trace/ftrace.h66
-rw-r--r--kernel/trace/Kconfig30
-rw-r--r--kernel/trace/Makefile3
-rw-r--r--kernel/trace/ftrace.c267
-rw-r--r--kernel/trace/trace.c441
-rw-r--r--kernel/trace/trace.h46
-rw-r--r--kernel/trace/trace_benchmark.c198
-rw-r--r--kernel/trace/trace_benchmark.h41
-rw-r--r--kernel/trace/trace_events.c13
-rw-r--r--kernel/trace/trace_functions.c56
-rw-r--r--kernel/trace/trace_functions_graph.c19
-rw-r--r--kernel/trace/trace_irqsoff.c71
-rw-r--r--kernel/trace/trace_kprobe.c3
-rw-r--r--kernel/trace/trace_nop.c1
-rw-r--r--kernel/trace/trace_output.c41
-rw-r--r--kernel/trace/trace_sched_wakeup.c70
-rw-r--r--kernel/trace/trace_selftest.c69
-rw-r--r--kernel/trace/trace_stack.c42
30 files changed, 1326 insertions, 729 deletions
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index bd365988e8d8..2479b2a0c77c 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -2003,6 +2003,32 @@ want, depending on your needs.
2003 360.774530 | 1) 0.594 us | __phys_addr(); 2003 360.774530 | 1) 0.594 us | __phys_addr();
2004 2004
2005 2005
2006The function name is always displayed after the closing bracket
2007for a function if the start of that function is not in the
2008trace buffer.
2009
2010Display of the function name after the closing bracket may be
2011enabled for functions whose start is in the trace buffer,
2012allowing easier searching with grep for function durations.
2013It is default disabled.
2014
2015 hide: echo nofuncgraph-tail > trace_options
2016 show: echo funcgraph-tail > trace_options
2017
2018 Example with nofuncgraph-tail (default):
2019 0) | putname() {
2020 0) | kmem_cache_free() {
2021 0) 0.518 us | __phys_addr();
2022 0) 1.757 us | }
2023 0) 2.861 us | }
2024
2025 Example with funcgraph-tail:
2026 0) | putname() {
2027 0) | kmem_cache_free() {
2028 0) 0.518 us | __phys_addr();
2029 0) 1.757 us | } /* kmem_cache_free() */
2030 0) 2.861 us | } /* putname() */
2031
2006You can put some comments on specific functions by using 2032You can put some comments on specific functions by using
2007trace_printk() For example, if you want to put a comment inside 2033trace_printk() For example, if you want to put a comment inside
2008the __might_sleep() function, you just have to include 2034the __might_sleep() function, you just have to include
diff --git a/Documentation/trace/tracepoints.txt b/Documentation/trace/tracepoints.txt
index 6b018b53177a..a3efac621c5a 100644
--- a/Documentation/trace/tracepoints.txt
+++ b/Documentation/trace/tracepoints.txt
@@ -115,6 +115,30 @@ If the tracepoint has to be used in kernel modules, an
115EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be 115EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be
116used to export the defined tracepoints. 116used to export the defined tracepoints.
117 117
118If you need to do a bit of work for a tracepoint parameter, and
119that work is only used for the tracepoint, that work can be encapsulated
120within an if statement with the following:
121
122 if (trace_foo_bar_enabled()) {
123 int i;
124 int tot = 0;
125
126 for (i = 0; i < count; i++)
127 tot += calculate_nuggets();
128
129 trace_foo_bar(tot);
130 }
131
132All trace_<tracepoint>() calls have a matching trace_<tracepoint>_enabled()
133function defined that returns true if the tracepoint is enabled and
134false otherwise. The trace_<tracepoint>() should always be within the
135block of the if (trace_<tracepoint>_enabled()) to prevent races between
136the tracepoint being enabled and the check being seen.
137
138The advantage of using the trace_<tracepoint>_enabled() is that it uses
139the static_key of the tracepoint to allow the if statement to be implemented
140with jump labels and avoid conditional branches.
141
118Note: The convenience macro TRACE_EVENT provides an alternative way to 142Note: The convenience macro TRACE_EVENT provides an alternative way to
119 define tracepoints. Check http://lwn.net/Articles/379903, 143 define tracepoints. Check http://lwn.net/Articles/379903,
120 http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362 144 http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362
diff --git a/MAINTAINERS b/MAINTAINERS
index 0fbd4a04407b..454c054b1790 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9106,7 +9106,6 @@ F: drivers/char/tpm/
9106 9106
9107TRACING 9107TRACING
9108M: Steven Rostedt <rostedt@goodmis.org> 9108M: Steven Rostedt <rostedt@goodmis.org>
9109M: Frederic Weisbecker <fweisbec@gmail.com>
9110M: Ingo Molnar <mingo@redhat.com> 9109M: Ingo Molnar <mingo@redhat.com>
9111T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core 9110T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
9112S: Maintained 9111S: Maintained
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 491ef3e59850..047f9ff2e36c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
26obj-y += probe_roms.o 26obj-y += probe_roms.o
27obj-$(CONFIG_X86_32) += i386_ksyms_32.o 27obj-$(CONFIG_X86_32) += i386_ksyms_32.o
28obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 28obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
29obj-$(CONFIG_X86_64) += mcount_64.o
29obj-y += syscall_$(BITS).o vsyscall_gtod.o 30obj-y += syscall_$(BITS).o vsyscall_gtod.o
30obj-$(CONFIG_X86_64) += vsyscall_64.o 31obj-$(CONFIG_X86_64) += vsyscall_64.o
31obj-$(CONFIG_X86_64) += vsyscall_emu_64.o 32obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 96987987c5de..48a2644a082a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -53,7 +53,6 @@
53#include <asm/page_types.h> 53#include <asm/page_types.h>
54#include <asm/irqflags.h> 54#include <asm/irqflags.h>
55#include <asm/paravirt.h> 55#include <asm/paravirt.h>
56#include <asm/ftrace.h>
57#include <asm/percpu.h> 56#include <asm/percpu.h>
58#include <asm/asm.h> 57#include <asm/asm.h>
59#include <asm/context_tracking.h> 58#include <asm/context_tracking.h>
@@ -70,209 +69,6 @@
70 .code64 69 .code64
71 .section .entry.text, "ax" 70 .section .entry.text, "ax"
72 71
73#ifdef CONFIG_FUNCTION_TRACER
74
75#ifdef CC_USING_FENTRY
76# define function_hook __fentry__
77#else
78# define function_hook mcount
79#endif
80
81#ifdef CONFIG_DYNAMIC_FTRACE
82
83ENTRY(function_hook)
84 retq
85END(function_hook)
86
87/* skip is set if stack has been adjusted */
88.macro ftrace_caller_setup skip=0
89 MCOUNT_SAVE_FRAME \skip
90
91 /* Load the ftrace_ops into the 3rd parameter */
92 movq function_trace_op(%rip), %rdx
93
94 /* Load ip into the first parameter */
95 movq RIP(%rsp), %rdi
96 subq $MCOUNT_INSN_SIZE, %rdi
97 /* Load the parent_ip into the second parameter */
98#ifdef CC_USING_FENTRY
99 movq SS+16(%rsp), %rsi
100#else
101 movq 8(%rbp), %rsi
102#endif
103.endm
104
105ENTRY(ftrace_caller)
106 /* Check if tracing was disabled (quick check) */
107 cmpl $0, function_trace_stop
108 jne ftrace_stub
109
110 ftrace_caller_setup
111 /* regs go into 4th parameter (but make it NULL) */
112 movq $0, %rcx
113
114GLOBAL(ftrace_call)
115 call ftrace_stub
116
117 MCOUNT_RESTORE_FRAME
118ftrace_return:
119
120#ifdef CONFIG_FUNCTION_GRAPH_TRACER
121GLOBAL(ftrace_graph_call)
122 jmp ftrace_stub
123#endif
124
125GLOBAL(ftrace_stub)
126 retq
127END(ftrace_caller)
128
129ENTRY(ftrace_regs_caller)
130 /* Save the current flags before compare (in SS location)*/
131 pushfq
132
133 /* Check if tracing was disabled (quick check) */
134 cmpl $0, function_trace_stop
135 jne ftrace_restore_flags
136
137 /* skip=8 to skip flags saved in SS */
138 ftrace_caller_setup 8
139
140 /* Save the rest of pt_regs */
141 movq %r15, R15(%rsp)
142 movq %r14, R14(%rsp)
143 movq %r13, R13(%rsp)
144 movq %r12, R12(%rsp)
145 movq %r11, R11(%rsp)
146 movq %r10, R10(%rsp)
147 movq %rbp, RBP(%rsp)
148 movq %rbx, RBX(%rsp)
149 /* Copy saved flags */
150 movq SS(%rsp), %rcx
151 movq %rcx, EFLAGS(%rsp)
152 /* Kernel segments */
153 movq $__KERNEL_DS, %rcx
154 movq %rcx, SS(%rsp)
155 movq $__KERNEL_CS, %rcx
156 movq %rcx, CS(%rsp)
157 /* Stack - skipping return address */
158 leaq SS+16(%rsp), %rcx
159 movq %rcx, RSP(%rsp)
160
161 /* regs go into 4th parameter */
162 leaq (%rsp), %rcx
163
164GLOBAL(ftrace_regs_call)
165 call ftrace_stub
166
167 /* Copy flags back to SS, to restore them */
168 movq EFLAGS(%rsp), %rax
169 movq %rax, SS(%rsp)
170
171 /* Handlers can change the RIP */
172 movq RIP(%rsp), %rax
173 movq %rax, SS+8(%rsp)
174
175 /* restore the rest of pt_regs */
176 movq R15(%rsp), %r15
177 movq R14(%rsp), %r14
178 movq R13(%rsp), %r13
179 movq R12(%rsp), %r12
180 movq R10(%rsp), %r10
181 movq RBP(%rsp), %rbp
182 movq RBX(%rsp), %rbx
183
184 /* skip=8 to skip flags saved in SS */
185 MCOUNT_RESTORE_FRAME 8
186
187 /* Restore flags */
188 popfq
189
190 jmp ftrace_return
191ftrace_restore_flags:
192 popfq
193 jmp ftrace_stub
194
195END(ftrace_regs_caller)
196
197
198#else /* ! CONFIG_DYNAMIC_FTRACE */
199
200ENTRY(function_hook)
201 cmpl $0, function_trace_stop
202 jne ftrace_stub
203
204 cmpq $ftrace_stub, ftrace_trace_function
205 jnz trace
206
207#ifdef CONFIG_FUNCTION_GRAPH_TRACER
208 cmpq $ftrace_stub, ftrace_graph_return
209 jnz ftrace_graph_caller
210
211 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
212 jnz ftrace_graph_caller
213#endif
214
215GLOBAL(ftrace_stub)
216 retq
217
218trace:
219 MCOUNT_SAVE_FRAME
220
221 movq RIP(%rsp), %rdi
222#ifdef CC_USING_FENTRY
223 movq SS+16(%rsp), %rsi
224#else
225 movq 8(%rbp), %rsi
226#endif
227 subq $MCOUNT_INSN_SIZE, %rdi
228
229 call *ftrace_trace_function
230
231 MCOUNT_RESTORE_FRAME
232
233 jmp ftrace_stub
234END(function_hook)
235#endif /* CONFIG_DYNAMIC_FTRACE */
236#endif /* CONFIG_FUNCTION_TRACER */
237
238#ifdef CONFIG_FUNCTION_GRAPH_TRACER
239ENTRY(ftrace_graph_caller)
240 MCOUNT_SAVE_FRAME
241
242#ifdef CC_USING_FENTRY
243 leaq SS+16(%rsp), %rdi
244 movq $0, %rdx /* No framepointers needed */
245#else
246 leaq 8(%rbp), %rdi
247 movq (%rbp), %rdx
248#endif
249 movq RIP(%rsp), %rsi
250 subq $MCOUNT_INSN_SIZE, %rsi
251
252 call prepare_ftrace_return
253
254 MCOUNT_RESTORE_FRAME
255
256 retq
257END(ftrace_graph_caller)
258
259GLOBAL(return_to_handler)
260 subq $24, %rsp
261
262 /* Save the return values */
263 movq %rax, (%rsp)
264 movq %rdx, 8(%rsp)
265 movq %rbp, %rdi
266
267 call ftrace_return_to_handler
268
269 movq %rax, %rdi
270 movq 8(%rsp), %rdx
271 movq (%rsp), %rax
272 addq $24, %rsp
273 jmp *%rdi
274#endif
275
276 72
277#ifndef CONFIG_PREEMPT 73#ifndef CONFIG_PREEMPT
278#define retint_kernel retint_restore_args 74#define retint_kernel retint_restore_args
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 52819e816f87..cbc4a91b131e 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -297,16 +297,7 @@ int ftrace_int3_handler(struct pt_regs *regs)
297 297
298static int ftrace_write(unsigned long ip, const char *val, int size) 298static int ftrace_write(unsigned long ip, const char *val, int size)
299{ 299{
300 /* 300 ip = text_ip_addr(ip);
301 * On x86_64, kernel text mappings are mapped read-only with
302 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
303 * of the kernel text mapping to modify the kernel text.
304 *
305 * For 32bit kernels, these mappings are same and we can use
306 * kernel identity mapping to modify code.
307 */
308 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
309 ip = (unsigned long)__va(__pa_symbol(ip));
310 301
311 if (probe_kernel_write((void *)ip, val, size)) 302 if (probe_kernel_write((void *)ip, val, size))
312 return -EPERM; 303 return -EPERM;
@@ -349,40 +340,14 @@ static int add_brk_on_nop(struct dyn_ftrace *rec)
349 return add_break(rec->ip, old); 340 return add_break(rec->ip, old);
350} 341}
351 342
352/*
353 * If the record has the FTRACE_FL_REGS set, that means that it
354 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
355 * is not not set, then it wants to convert to the normal callback.
356 */
357static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
358{
359 if (rec->flags & FTRACE_FL_REGS)
360 return (unsigned long)FTRACE_REGS_ADDR;
361 else
362 return (unsigned long)FTRACE_ADDR;
363}
364
365/*
366 * The FTRACE_FL_REGS_EN is set when the record already points to
367 * a function that saves all the regs. Basically the '_EN' version
368 * represents the current state of the function.
369 */
370static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
371{
372 if (rec->flags & FTRACE_FL_REGS_EN)
373 return (unsigned long)FTRACE_REGS_ADDR;
374 else
375 return (unsigned long)FTRACE_ADDR;
376}
377
378static int add_breakpoints(struct dyn_ftrace *rec, int enable) 343static int add_breakpoints(struct dyn_ftrace *rec, int enable)
379{ 344{
380 unsigned long ftrace_addr; 345 unsigned long ftrace_addr;
381 int ret; 346 int ret;
382 347
383 ret = ftrace_test_record(rec, enable); 348 ftrace_addr = ftrace_get_addr_curr(rec);
384 349
385 ftrace_addr = get_ftrace_addr(rec); 350 ret = ftrace_test_record(rec, enable);
386 351
387 switch (ret) { 352 switch (ret) {
388 case FTRACE_UPDATE_IGNORE: 353 case FTRACE_UPDATE_IGNORE:
@@ -392,10 +357,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
392 /* converting nop to call */ 357 /* converting nop to call */
393 return add_brk_on_nop(rec); 358 return add_brk_on_nop(rec);
394 359
395 case FTRACE_UPDATE_MODIFY_CALL_REGS:
396 case FTRACE_UPDATE_MODIFY_CALL: 360 case FTRACE_UPDATE_MODIFY_CALL:
397 ftrace_addr = get_ftrace_old_addr(rec);
398 /* fall through */
399 case FTRACE_UPDATE_MAKE_NOP: 361 case FTRACE_UPDATE_MAKE_NOP:
400 /* converting a call to a nop */ 362 /* converting a call to a nop */
401 return add_brk_on_call(rec, ftrace_addr); 363 return add_brk_on_call(rec, ftrace_addr);
@@ -440,14 +402,14 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
440 * If not, don't touch the breakpoint, we make just create 402 * If not, don't touch the breakpoint, we make just create
441 * a disaster. 403 * a disaster.
442 */ 404 */
443 ftrace_addr = get_ftrace_addr(rec); 405 ftrace_addr = ftrace_get_addr_new(rec);
444 nop = ftrace_call_replace(ip, ftrace_addr); 406 nop = ftrace_call_replace(ip, ftrace_addr);
445 407
446 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0) 408 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
447 goto update; 409 goto update;
448 410
449 /* Check both ftrace_addr and ftrace_old_addr */ 411 /* Check both ftrace_addr and ftrace_old_addr */
450 ftrace_addr = get_ftrace_old_addr(rec); 412 ftrace_addr = ftrace_get_addr_curr(rec);
451 nop = ftrace_call_replace(ip, ftrace_addr); 413 nop = ftrace_call_replace(ip, ftrace_addr);
452 414
453 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) 415 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
@@ -491,13 +453,12 @@ static int add_update(struct dyn_ftrace *rec, int enable)
491 453
492 ret = ftrace_test_record(rec, enable); 454 ret = ftrace_test_record(rec, enable);
493 455
494 ftrace_addr = get_ftrace_addr(rec); 456 ftrace_addr = ftrace_get_addr_new(rec);
495 457
496 switch (ret) { 458 switch (ret) {
497 case FTRACE_UPDATE_IGNORE: 459 case FTRACE_UPDATE_IGNORE:
498 return 0; 460 return 0;
499 461
500 case FTRACE_UPDATE_MODIFY_CALL_REGS:
501 case FTRACE_UPDATE_MODIFY_CALL: 462 case FTRACE_UPDATE_MODIFY_CALL:
502 case FTRACE_UPDATE_MAKE_CALL: 463 case FTRACE_UPDATE_MAKE_CALL:
503 /* converting nop to call */ 464 /* converting nop to call */
@@ -538,13 +499,12 @@ static int finish_update(struct dyn_ftrace *rec, int enable)
538 499
539 ret = ftrace_update_record(rec, enable); 500 ret = ftrace_update_record(rec, enable);
540 501
541 ftrace_addr = get_ftrace_addr(rec); 502 ftrace_addr = ftrace_get_addr_new(rec);
542 503
543 switch (ret) { 504 switch (ret) {
544 case FTRACE_UPDATE_IGNORE: 505 case FTRACE_UPDATE_IGNORE:
545 return 0; 506 return 0;
546 507
547 case FTRACE_UPDATE_MODIFY_CALL_REGS:
548 case FTRACE_UPDATE_MODIFY_CALL: 508 case FTRACE_UPDATE_MODIFY_CALL:
549 case FTRACE_UPDATE_MAKE_CALL: 509 case FTRACE_UPDATE_MAKE_CALL:
550 /* converting nop to call */ 510 /* converting nop to call */
@@ -621,8 +581,8 @@ void ftrace_replace_code(int enable)
621 return; 581 return;
622 582
623 remove_breakpoints: 583 remove_breakpoints:
584 pr_warn("Failed on %s (%d):\n", report, count);
624 ftrace_bug(ret, rec ? rec->ip : 0); 585 ftrace_bug(ret, rec ? rec->ip : 0);
625 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
626 for_ftrace_rec_iter(iter) { 586 for_ftrace_rec_iter(iter) {
627 rec = ftrace_rec_iter_record(iter); 587 rec = ftrace_rec_iter_record(iter);
628 /* 588 /*
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
new file mode 100644
index 000000000000..c050a0153168
--- /dev/null
+++ b/arch/x86/kernel/mcount_64.S
@@ -0,0 +1,217 @@
1/*
2 * linux/arch/x86_64/mcount_64.S
3 *
4 * Copyright (C) 2014 Steven Rostedt, Red Hat Inc
5 */
6
7#include <linux/linkage.h>
8#include <asm/ptrace.h>
9#include <asm/ftrace.h>
10
11
12 .code64
13 .section .entry.text, "ax"
14
15
16#ifdef CONFIG_FUNCTION_TRACER
17
18#ifdef CC_USING_FENTRY
19# define function_hook __fentry__
20#else
21# define function_hook mcount
22#endif
23
24#ifdef CONFIG_DYNAMIC_FTRACE
25
26ENTRY(function_hook)
27 retq
28END(function_hook)
29
30/* skip is set if stack has been adjusted */
31.macro ftrace_caller_setup skip=0
32 MCOUNT_SAVE_FRAME \skip
33
34 /* Load the ftrace_ops into the 3rd parameter */
35 movq function_trace_op(%rip), %rdx
36
37 /* Load ip into the first parameter */
38 movq RIP(%rsp), %rdi
39 subq $MCOUNT_INSN_SIZE, %rdi
40 /* Load the parent_ip into the second parameter */
41#ifdef CC_USING_FENTRY
42 movq SS+16(%rsp), %rsi
43#else
44 movq 8(%rbp), %rsi
45#endif
46.endm
47
48ENTRY(ftrace_caller)
49 /* Check if tracing was disabled (quick check) */
50 cmpl $0, function_trace_stop
51 jne ftrace_stub
52
53 ftrace_caller_setup
54 /* regs go into 4th parameter (but make it NULL) */
55 movq $0, %rcx
56
57GLOBAL(ftrace_call)
58 call ftrace_stub
59
60 MCOUNT_RESTORE_FRAME
61ftrace_return:
62
63#ifdef CONFIG_FUNCTION_GRAPH_TRACER
64GLOBAL(ftrace_graph_call)
65 jmp ftrace_stub
66#endif
67
68GLOBAL(ftrace_stub)
69 retq
70END(ftrace_caller)
71
72ENTRY(ftrace_regs_caller)
73 /* Save the current flags before compare (in SS location)*/
74 pushfq
75
76 /* Check if tracing was disabled (quick check) */
77 cmpl $0, function_trace_stop
78 jne ftrace_restore_flags
79
80 /* skip=8 to skip flags saved in SS */
81 ftrace_caller_setup 8
82
83 /* Save the rest of pt_regs */
84 movq %r15, R15(%rsp)
85 movq %r14, R14(%rsp)
86 movq %r13, R13(%rsp)
87 movq %r12, R12(%rsp)
88 movq %r11, R11(%rsp)
89 movq %r10, R10(%rsp)
90 movq %rbp, RBP(%rsp)
91 movq %rbx, RBX(%rsp)
92 /* Copy saved flags */
93 movq SS(%rsp), %rcx
94 movq %rcx, EFLAGS(%rsp)
95 /* Kernel segments */
96 movq $__KERNEL_DS, %rcx
97 movq %rcx, SS(%rsp)
98 movq $__KERNEL_CS, %rcx
99 movq %rcx, CS(%rsp)
100 /* Stack - skipping return address */
101 leaq SS+16(%rsp), %rcx
102 movq %rcx, RSP(%rsp)
103
104 /* regs go into 4th parameter */
105 leaq (%rsp), %rcx
106
107GLOBAL(ftrace_regs_call)
108 call ftrace_stub
109
110 /* Copy flags back to SS, to restore them */
111 movq EFLAGS(%rsp), %rax
112 movq %rax, SS(%rsp)
113
114 /* Handlers can change the RIP */
115 movq RIP(%rsp), %rax
116 movq %rax, SS+8(%rsp)
117
118 /* restore the rest of pt_regs */
119 movq R15(%rsp), %r15
120 movq R14(%rsp), %r14
121 movq R13(%rsp), %r13
122 movq R12(%rsp), %r12
123 movq R10(%rsp), %r10
124 movq RBP(%rsp), %rbp
125 movq RBX(%rsp), %rbx
126
127 /* skip=8 to skip flags saved in SS */
128 MCOUNT_RESTORE_FRAME 8
129
130 /* Restore flags */
131 popfq
132
133 jmp ftrace_return
134ftrace_restore_flags:
135 popfq
136 jmp ftrace_stub
137
138END(ftrace_regs_caller)
139
140
141#else /* ! CONFIG_DYNAMIC_FTRACE */
142
143ENTRY(function_hook)
144 cmpl $0, function_trace_stop
145 jne ftrace_stub
146
147 cmpq $ftrace_stub, ftrace_trace_function
148 jnz trace
149
150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
151 cmpq $ftrace_stub, ftrace_graph_return
152 jnz ftrace_graph_caller
153
154 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
155 jnz ftrace_graph_caller
156#endif
157
158GLOBAL(ftrace_stub)
159 retq
160
161trace:
162 MCOUNT_SAVE_FRAME
163
164 movq RIP(%rsp), %rdi
165#ifdef CC_USING_FENTRY
166 movq SS+16(%rsp), %rsi
167#else
168 movq 8(%rbp), %rsi
169#endif
170 subq $MCOUNT_INSN_SIZE, %rdi
171
172 call *ftrace_trace_function
173
174 MCOUNT_RESTORE_FRAME
175
176 jmp ftrace_stub
177END(function_hook)
178#endif /* CONFIG_DYNAMIC_FTRACE */
179#endif /* CONFIG_FUNCTION_TRACER */
180
181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
182ENTRY(ftrace_graph_caller)
183 MCOUNT_SAVE_FRAME
184
185#ifdef CC_USING_FENTRY
186 leaq SS+16(%rsp), %rdi
187 movq $0, %rdx /* No framepointers needed */
188#else
189 leaq 8(%rbp), %rdi
190 movq (%rbp), %rdx
191#endif
192 movq RIP(%rsp), %rsi
193 subq $MCOUNT_INSN_SIZE, %rsi
194
195 call prepare_ftrace_return
196
197 MCOUNT_RESTORE_FRAME
198
199 retq
200END(ftrace_graph_caller)
201
202GLOBAL(return_to_handler)
203 subq $24, %rsp
204
205 /* Save the return values */
206 movq %rax, (%rsp)
207 movq %rdx, 8(%rsp)
208 movq %rbp, %rdi
209
210 call ftrace_return_to_handler
211
212 movq %rax, %rdi
213 movq 8(%rsp), %rdx
214 movq (%rsp), %rax
215 addq $24, %rsp
216 jmp *%rdi
217#endif
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 2018751cad9e..404a686a3644 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -62,9 +62,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
62 * set in the flags member. 62 * set in the flags member.
63 * 63 *
64 * ENABLED - set/unset when ftrace_ops is registered/unregistered 64 * ENABLED - set/unset when ftrace_ops is registered/unregistered
65 * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
66 * is part of the global tracers sharing the same filter
67 * via set_ftrace_* debugfs files.
68 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 65 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
69 * allocated ftrace_ops which need special care 66 * allocated ftrace_ops which need special care
70 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops 67 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
@@ -96,15 +93,14 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
96 */ 93 */
97enum { 94enum {
98 FTRACE_OPS_FL_ENABLED = 1 << 0, 95 FTRACE_OPS_FL_ENABLED = 1 << 0,
99 FTRACE_OPS_FL_GLOBAL = 1 << 1, 96 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
100 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 97 FTRACE_OPS_FL_CONTROL = 1 << 2,
101 FTRACE_OPS_FL_CONTROL = 1 << 3, 98 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
102 FTRACE_OPS_FL_SAVE_REGS = 1 << 4, 99 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
103 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, 100 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
104 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 101 FTRACE_OPS_FL_STUB = 1 << 6,
105 FTRACE_OPS_FL_STUB = 1 << 7, 102 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
106 FTRACE_OPS_FL_INITIALIZED = 1 << 8, 103 FTRACE_OPS_FL_DELETED = 1 << 8,
107 FTRACE_OPS_FL_DELETED = 1 << 9,
108}; 104};
109 105
110/* 106/*
@@ -366,14 +362,12 @@ enum {
366 * IGNORE - The function is already what we want it to be 362 * IGNORE - The function is already what we want it to be
367 * MAKE_CALL - Start tracing the function 363 * MAKE_CALL - Start tracing the function
368 * MODIFY_CALL - Stop saving regs for the function 364 * MODIFY_CALL - Stop saving regs for the function
369 * MODIFY_CALL_REGS - Start saving regs for the function
370 * MAKE_NOP - Stop tracing the function 365 * MAKE_NOP - Stop tracing the function
371 */ 366 */
372enum { 367enum {
373 FTRACE_UPDATE_IGNORE, 368 FTRACE_UPDATE_IGNORE,
374 FTRACE_UPDATE_MAKE_CALL, 369 FTRACE_UPDATE_MAKE_CALL,
375 FTRACE_UPDATE_MODIFY_CALL, 370 FTRACE_UPDATE_MODIFY_CALL,
376 FTRACE_UPDATE_MODIFY_CALL_REGS,
377 FTRACE_UPDATE_MAKE_NOP, 371 FTRACE_UPDATE_MAKE_NOP,
378}; 372};
379 373
@@ -404,6 +398,8 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
404int ftrace_test_record(struct dyn_ftrace *rec, int enable); 398int ftrace_test_record(struct dyn_ftrace *rec, int enable);
405void ftrace_run_stop_machine(int command); 399void ftrace_run_stop_machine(int command);
406unsigned long ftrace_location(unsigned long ip); 400unsigned long ftrace_location(unsigned long ip);
401unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
402unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
407 403
408extern ftrace_func_t ftrace_trace_function; 404extern ftrace_func_t ftrace_trace_function;
409 405
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index d16da3e53bc7..cff3106ffe2c 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -38,6 +38,9 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
38 *symbol_array); 38 *symbol_array);
39#endif 39#endif
40 40
41const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
42 unsigned int bitmask_size);
43
41const char *ftrace_print_hex_seq(struct trace_seq *p, 44const char *ftrace_print_hex_seq(struct trace_seq *p,
42 const unsigned char *buf, int len); 45 const unsigned char *buf, int len);
43 46
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 925eaf28fca9..7bd2ad01e39c 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -355,7 +355,7 @@ static inline void reset_current_kprobe(void)
355 355
356static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) 356static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
357{ 357{
358 return (&__get_cpu_var(kprobe_ctlblk)); 358 return this_cpu_ptr(&kprobe_ctlblk);
359} 359}
360 360
361int register_kprobe(struct kprobe *p); 361int register_kprobe(struct kprobe *p);
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index a32d86ec8bf2..136116924d8d 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -46,6 +46,9 @@ extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
46extern void *trace_seq_reserve(struct trace_seq *s, size_t len); 46extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
47extern int trace_seq_path(struct trace_seq *s, const struct path *path); 47extern int trace_seq_path(struct trace_seq *s, const struct path *path);
48 48
49extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
50 int nmaskbits);
51
49#else /* CONFIG_TRACING */ 52#else /* CONFIG_TRACING */
50static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 53static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
51{ 54{
@@ -57,6 +60,13 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
57 return 0; 60 return 0;
58} 61}
59 62
63static inline int
64trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
65 int nmaskbits)
66{
67 return 0;
68}
69
60static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) 70static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
61{ 71{
62 return 0; 72 return 0;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 9d30ee469c2a..2e2a5f7717e5 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -185,6 +185,11 @@ extern void syscall_unregfunc(void);
185 static inline void \ 185 static inline void \
186 check_trace_callback_type_##name(void (*cb)(data_proto)) \ 186 check_trace_callback_type_##name(void (*cb)(data_proto)) \
187 { \ 187 { \
188 } \
189 static inline bool \
190 trace_##name##_enabled(void) \
191 { \
192 return static_key_false(&__tracepoint_##name.key); \
188 } 193 }
189 194
190/* 195/*
@@ -230,6 +235,11 @@ extern void syscall_unregfunc(void);
230 } \ 235 } \
231 static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ 236 static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
232 { \ 237 { \
238 } \
239 static inline bool \
240 trace_##name##_enabled(void) \
241 { \
242 return false; \
233 } 243 }
234 244
235#define DEFINE_TRACE_FN(name, reg, unreg) 245#define DEFINE_TRACE_FN(name, reg, unreg)
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 0a1a4f7caf09..0fd06fef9fac 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -53,6 +53,9 @@
53#undef __string 53#undef __string
54#define __string(item, src) __dynamic_array(char, item, -1) 54#define __string(item, src) __dynamic_array(char, item, -1)
55 55
56#undef __bitmask
57#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
58
56#undef TP_STRUCT__entry 59#undef TP_STRUCT__entry
57#define TP_STRUCT__entry(args...) args 60#define TP_STRUCT__entry(args...) args
58 61
@@ -128,6 +131,9 @@
128#undef __string 131#undef __string
129#define __string(item, src) __dynamic_array(char, item, -1) 132#define __string(item, src) __dynamic_array(char, item, -1)
130 133
134#undef __bitmask
135#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
136
131#undef DECLARE_EVENT_CLASS 137#undef DECLARE_EVENT_CLASS
132#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 138#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
133 struct ftrace_data_offsets_##call { \ 139 struct ftrace_data_offsets_##call { \
@@ -197,9 +203,22 @@
197#define __get_dynamic_array(field) \ 203#define __get_dynamic_array(field) \
198 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 204 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
199 205
206#undef __get_dynamic_array_len
207#define __get_dynamic_array_len(field) \
208 ((__entry->__data_loc_##field >> 16) & 0xffff)
209
200#undef __get_str 210#undef __get_str
201#define __get_str(field) (char *)__get_dynamic_array(field) 211#define __get_str(field) (char *)__get_dynamic_array(field)
202 212
213#undef __get_bitmask
214#define __get_bitmask(field) \
215 ({ \
216 void *__bitmask = __get_dynamic_array(field); \
217 unsigned int __bitmask_size; \
218 __bitmask_size = __get_dynamic_array_len(field); \
219 ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
220 })
221
203#undef __print_flags 222#undef __print_flags
204#define __print_flags(flag, delim, flag_array...) \ 223#define __print_flags(flag, delim, flag_array...) \
205 ({ \ 224 ({ \
@@ -322,6 +341,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
322#undef __string 341#undef __string
323#define __string(item, src) __dynamic_array(char, item, -1) 342#define __string(item, src) __dynamic_array(char, item, -1)
324 343
344#undef __bitmask
345#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
346
325#undef DECLARE_EVENT_CLASS 347#undef DECLARE_EVENT_CLASS
326#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 348#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
327static int notrace __init \ 349static int notrace __init \
@@ -372,6 +394,29 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
372#define __string(item, src) __dynamic_array(char, item, \ 394#define __string(item, src) __dynamic_array(char, item, \
373 strlen((src) ? (const char *)(src) : "(null)") + 1) 395 strlen((src) ? (const char *)(src) : "(null)") + 1)
374 396
397/*
398 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
399 * num_possible_cpus().
400 */
401#define __bitmask_size_in_bytes_raw(nr_bits) \
402 (((nr_bits) + 7) / 8)
403
404#define __bitmask_size_in_longs(nr_bits) \
405 ((__bitmask_size_in_bytes_raw(nr_bits) + \
406 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
407
408/*
409 * __bitmask_size_in_bytes is the number of bytes needed to hold
410 * num_possible_cpus() padded out to the nearest long. This is what
411 * is saved in the buffer, just to be consistent.
412 */
413#define __bitmask_size_in_bytes(nr_bits) \
414 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
415
416#undef __bitmask
417#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
418 __bitmask_size_in_longs(nr_bits))
419
375#undef DECLARE_EVENT_CLASS 420#undef DECLARE_EVENT_CLASS
376#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 421#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
377static inline notrace int ftrace_get_offsets_##call( \ 422static inline notrace int ftrace_get_offsets_##call( \
@@ -513,12 +558,22 @@ static inline notrace int ftrace_get_offsets_##call( \
513 __entry->__data_loc_##item = __data_offsets.item; 558 __entry->__data_loc_##item = __data_offsets.item;
514 559
515#undef __string 560#undef __string
516#define __string(item, src) __dynamic_array(char, item, -1) \ 561#define __string(item, src) __dynamic_array(char, item, -1)
517 562
518#undef __assign_str 563#undef __assign_str
519#define __assign_str(dst, src) \ 564#define __assign_str(dst, src) \
520 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 565 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
521 566
567#undef __bitmask
568#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
569
570#undef __get_bitmask
571#define __get_bitmask(field) (char *)__get_dynamic_array(field)
572
573#undef __assign_bitmask
574#define __assign_bitmask(dst, src, nr_bits) \
575 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
576
522#undef TP_fast_assign 577#undef TP_fast_assign
523#define TP_fast_assign(args...) args 578#define TP_fast_assign(args...) args
524 579
@@ -585,7 +640,9 @@ static inline void ftrace_test_probe_##call(void) \
585#undef __print_symbolic 640#undef __print_symbolic
586#undef __print_hex 641#undef __print_hex
587#undef __get_dynamic_array 642#undef __get_dynamic_array
643#undef __get_dynamic_array_len
588#undef __get_str 644#undef __get_str
645#undef __get_bitmask
589 646
590#undef TP_printk 647#undef TP_printk
591#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 648#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
@@ -648,9 +705,16 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
648#define __get_dynamic_array(field) \ 705#define __get_dynamic_array(field) \
649 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 706 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
650 707
708#undef __get_dynamic_array_len
709#define __get_dynamic_array_len(field) \
710 ((__entry->__data_loc_##field >> 16) & 0xffff)
711
651#undef __get_str 712#undef __get_str
652#define __get_str(field) (char *)__get_dynamic_array(field) 713#define __get_str(field) (char *)__get_dynamic_array(field)
653 714
715#undef __get_bitmask
716#define __get_bitmask(field) (char *)__get_dynamic_array(field)
717
654#undef __perf_addr 718#undef __perf_addr
655#define __perf_addr(a) (__addr = (a)) 719#define __perf_addr(a) (__addr = (a))
656 720
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8639819f6cef..d4409356f40d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -535,6 +535,36 @@ config MMIOTRACE_TEST
535 535
536 Say N, unless you absolutely know what you are doing. 536 Say N, unless you absolutely know what you are doing.
537 537
538config TRACEPOINT_BENCHMARK
539 bool "Add tracepoint that benchmarks tracepoints"
540 help
541 This option creates the tracepoint "benchmark:benchmark_event".
542 When the tracepoint is enabled, it kicks off a kernel thread that
543 goes into an infinite loop (calling cond_sched() to let other tasks
544 run), and calls the tracepoint. Each iteration will record the time
545 it took to write to the tracepoint and the next iteration that
546 data will be passed to the tracepoint itself. That is, the tracepoint
547 will report the time it took to do the previous tracepoint.
548 The string written to the tracepoint is a static string of 128 bytes
549 to keep the time the same. The initial string is simply a write of
550 "START". The second string records the cold cache time of the first
551 write which is not added to the rest of the calculations.
552
553 As it is a tight loop, it benchmarks as hot cache. That's fine because
554 we care most about hot paths that are probably in cache already.
555
556 An example of the output:
557
558 START
559 first=3672 [COLD CACHED]
560 last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
561 last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
562 last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
563 last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
564 last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
565 last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
566
567
538config RING_BUFFER_BENCHMARK 568config RING_BUFFER_BENCHMARK
539 tristate "Ring buffer benchmark stress tester" 569 tristate "Ring buffer benchmark stress tester"
540 depends on RING_BUFFER 570 depends on RING_BUFFER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 1378e84fbe39..2611613f14f1 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -17,6 +17,7 @@ ifdef CONFIG_TRACING_BRANCHES
17KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 17KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
18endif 18endif
19 19
20CFLAGS_trace_benchmark.o := -I$(src)
20CFLAGS_trace_events_filter.o := -I$(src) 21CFLAGS_trace_events_filter.o := -I$(src)
21 22
22obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o 23obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o
@@ -62,4 +63,6 @@ endif
62obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o 63obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
63obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o 64obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
64 65
66obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
67
65libftrace-y := ftrace.o 68libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4a54a25afa2f..5b372e3ed675 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -62,7 +62,7 @@
62#define FTRACE_HASH_DEFAULT_BITS 10 62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12 63#define FTRACE_HASH_MAX_BITS 12
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66 66
67#ifdef CONFIG_DYNAMIC_FTRACE 67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \ 68#define INIT_REGEX_LOCK(opsname) \
@@ -103,7 +103,6 @@ static int ftrace_disabled __read_mostly;
103 103
104static DEFINE_MUTEX(ftrace_lock); 104static DEFINE_MUTEX(ftrace_lock);
105 105
106static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
107static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 106static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
108static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 107static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
109ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 108ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
@@ -171,23 +170,6 @@ int ftrace_nr_registered_ops(void)
171 return cnt; 170 return cnt;
172} 171}
173 172
174static void
175ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
176 struct ftrace_ops *op, struct pt_regs *regs)
177{
178 int bit;
179
180 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
181 if (bit < 0)
182 return;
183
184 do_for_each_ftrace_op(op, ftrace_global_list) {
185 op->func(ip, parent_ip, op, regs);
186 } while_for_each_ftrace_op(op);
187
188 trace_clear_recursion(bit);
189}
190
191static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 173static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
192 struct ftrace_ops *op, struct pt_regs *regs) 174 struct ftrace_ops *op, struct pt_regs *regs)
193{ 175{
@@ -237,43 +219,6 @@ static int control_ops_alloc(struct ftrace_ops *ops)
237 return 0; 219 return 0;
238} 220}
239 221
240static void update_global_ops(void)
241{
242 ftrace_func_t func = ftrace_global_list_func;
243 void *private = NULL;
244
245 /* The list has its own recursion protection. */
246 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
247
248 /*
249 * If there's only one function registered, then call that
250 * function directly. Otherwise, we need to iterate over the
251 * registered callers.
252 */
253 if (ftrace_global_list == &ftrace_list_end ||
254 ftrace_global_list->next == &ftrace_list_end) {
255 func = ftrace_global_list->func;
256 private = ftrace_global_list->private;
257 /*
258 * As we are calling the function directly.
259 * If it does not have recursion protection,
260 * the function_trace_op needs to be updated
261 * accordingly.
262 */
263 if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
264 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265 }
266
267 /* If we filter on pids, update to use the pid function */
268 if (!list_empty(&ftrace_pids)) {
269 set_ftrace_pid_function(func);
270 func = ftrace_pid_func;
271 }
272
273 global_ops.func = func;
274 global_ops.private = private;
275}
276
277static void ftrace_sync(struct work_struct *work) 222static void ftrace_sync(struct work_struct *work)
278{ 223{
279 /* 224 /*
@@ -301,8 +246,6 @@ static void update_ftrace_function(void)
301{ 246{
302 ftrace_func_t func; 247 ftrace_func_t func;
303 248
304 update_global_ops();
305
306 /* 249 /*
307 * If we are at the end of the list and this ops is 250 * If we are at the end of the list and this ops is
308 * recursion safe and not dynamic and the arch supports passing ops, 251 * recursion safe and not dynamic and the arch supports passing ops,
@@ -314,10 +257,7 @@ static void update_ftrace_function(void)
314 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && 257 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
315 !FTRACE_FORCE_LIST_FUNC)) { 258 !FTRACE_FORCE_LIST_FUNC)) {
316 /* Set the ftrace_ops that the arch callback uses */ 259 /* Set the ftrace_ops that the arch callback uses */
317 if (ftrace_ops_list == &global_ops) 260 set_function_trace_op = ftrace_ops_list;
318 set_function_trace_op = ftrace_global_list;
319 else
320 set_function_trace_op = ftrace_ops_list;
321 func = ftrace_ops_list->func; 261 func = ftrace_ops_list->func;
322 } else { 262 } else {
323 /* Just use the default ftrace_ops */ 263 /* Just use the default ftrace_ops */
@@ -373,6 +313,11 @@ static void update_ftrace_function(void)
373 ftrace_trace_function = func; 313 ftrace_trace_function = func;
374} 314}
375 315
316int using_ftrace_ops_list_func(void)
317{
318 return ftrace_trace_function == ftrace_ops_list_func;
319}
320
376static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) 321static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
377{ 322{
378 ops->next = *list; 323 ops->next = *list;
@@ -434,16 +379,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
434 if (ops->flags & FTRACE_OPS_FL_DELETED) 379 if (ops->flags & FTRACE_OPS_FL_DELETED)
435 return -EINVAL; 380 return -EINVAL;
436 381
437 if (FTRACE_WARN_ON(ops == &global_ops))
438 return -EINVAL;
439
440 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 382 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
441 return -EBUSY; 383 return -EBUSY;
442 384
443 /* We don't support both control and global flags set. */
444 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
445 return -EINVAL;
446
447#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 385#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
448 /* 386 /*
449 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 387 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
@@ -461,10 +399,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
461 if (!core_kernel_data((unsigned long)ops)) 399 if (!core_kernel_data((unsigned long)ops))
462 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 400 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
463 401
464 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 402 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
465 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
466 ops->flags |= FTRACE_OPS_FL_ENABLED;
467 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
468 if (control_ops_alloc(ops)) 403 if (control_ops_alloc(ops))
469 return -ENOMEM; 404 return -ENOMEM;
470 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); 405 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
@@ -484,15 +419,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
484 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 419 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
485 return -EBUSY; 420 return -EBUSY;
486 421
487 if (FTRACE_WARN_ON(ops == &global_ops)) 422 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
488 return -EINVAL;
489
490 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
491 ret = remove_ftrace_list_ops(&ftrace_global_list,
492 &global_ops, ops);
493 if (!ret)
494 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
495 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
496 ret = remove_ftrace_list_ops(&ftrace_control_list, 423 ret = remove_ftrace_list_ops(&ftrace_control_list,
497 &control_ops, ops); 424 &control_ops, ops);
498 } else 425 } else
@@ -895,7 +822,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
895 822
896 local_irq_save(flags); 823 local_irq_save(flags);
897 824
898 stat = &__get_cpu_var(ftrace_profile_stats); 825 stat = this_cpu_ptr(&ftrace_profile_stats);
899 if (!stat->hash || !ftrace_profile_enabled) 826 if (!stat->hash || !ftrace_profile_enabled)
900 goto out; 827 goto out;
901 828
@@ -926,7 +853,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
926 unsigned long flags; 853 unsigned long flags;
927 854
928 local_irq_save(flags); 855 local_irq_save(flags);
929 stat = &__get_cpu_var(ftrace_profile_stats); 856 stat = this_cpu_ptr(&ftrace_profile_stats);
930 if (!stat->hash || !ftrace_profile_enabled) 857 if (!stat->hash || !ftrace_profile_enabled)
931 goto out; 858 goto out;
932 859
@@ -1178,7 +1105,7 @@ struct ftrace_page {
1178static struct ftrace_page *ftrace_pages_start; 1105static struct ftrace_page *ftrace_pages_start;
1179static struct ftrace_page *ftrace_pages; 1106static struct ftrace_page *ftrace_pages;
1180 1107
1181static bool ftrace_hash_empty(struct ftrace_hash *hash) 1108static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1182{ 1109{
1183 return !hash || !hash->count; 1110 return !hash || !hash->count;
1184} 1111}
@@ -1625,7 +1552,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1625 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1552 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1626 1553
1627 /* 1554 /*
1555 * If filter_hash is set, we want to match all functions
1556 * that are in the hash but not in the other hash.
1628 * 1557 *
1558 * If filter_hash is not set, then we are decrementing.
1559 * That means we match anything that is in the hash
1560 * and also in the other_hash. That is, we need to turn
1561 * off functions in the other hash because they are disabled
1562 * by this hash.
1629 */ 1563 */
1630 if (filter_hash && in_hash && !in_other_hash) 1564 if (filter_hash && in_hash && !in_other_hash)
1631 match = 1; 1565 match = 1;
@@ -1767,19 +1701,15 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1767 /* 1701 /*
1768 * If this record is being updated from a nop, then 1702 * If this record is being updated from a nop, then
1769 * return UPDATE_MAKE_CALL. 1703 * return UPDATE_MAKE_CALL.
1770 * Otherwise, if the EN flag is set, then return
1771 * UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1772 * from the non-save regs, to a save regs function.
1773 * Otherwise, 1704 * Otherwise,
1774 * return UPDATE_MODIFY_CALL to tell the caller to convert 1705 * return UPDATE_MODIFY_CALL to tell the caller to convert
1775 * from the save regs, to a non-save regs function. 1706 * from the save regs, to a non-save regs function or
1707 * vice versa.
1776 */ 1708 */
1777 if (flag & FTRACE_FL_ENABLED) 1709 if (flag & FTRACE_FL_ENABLED)
1778 return FTRACE_UPDATE_MAKE_CALL; 1710 return FTRACE_UPDATE_MAKE_CALL;
1779 else if (rec->flags & FTRACE_FL_REGS_EN) 1711
1780 return FTRACE_UPDATE_MODIFY_CALL_REGS; 1712 return FTRACE_UPDATE_MODIFY_CALL;
1781 else
1782 return FTRACE_UPDATE_MODIFY_CALL;
1783 } 1713 }
1784 1714
1785 if (update) { 1715 if (update) {
@@ -1821,6 +1751,42 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1821 return ftrace_check_record(rec, enable, 0); 1751 return ftrace_check_record(rec, enable, 0);
1822} 1752}
1823 1753
1754/**
1755 * ftrace_get_addr_new - Get the call address to set to
1756 * @rec: The ftrace record descriptor
1757 *
1758 * If the record has the FTRACE_FL_REGS set, that means that it
1759 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
1760 * is not not set, then it wants to convert to the normal callback.
1761 *
1762 * Returns the address of the trampoline to set to
1763 */
1764unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
1765{
1766 if (rec->flags & FTRACE_FL_REGS)
1767 return (unsigned long)FTRACE_REGS_ADDR;
1768 else
1769 return (unsigned long)FTRACE_ADDR;
1770}
1771
1772/**
1773 * ftrace_get_addr_curr - Get the call address that is already there
1774 * @rec: The ftrace record descriptor
1775 *
1776 * The FTRACE_FL_REGS_EN is set when the record already points to
1777 * a function that saves all the regs. Basically the '_EN' version
1778 * represents the current state of the function.
1779 *
1780 * Returns the address of the trampoline that is currently being called
1781 */
1782unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
1783{
1784 if (rec->flags & FTRACE_FL_REGS_EN)
1785 return (unsigned long)FTRACE_REGS_ADDR;
1786 else
1787 return (unsigned long)FTRACE_ADDR;
1788}
1789
1824static int 1790static int
1825__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1791__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1826{ 1792{
@@ -1828,12 +1794,12 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1828 unsigned long ftrace_addr; 1794 unsigned long ftrace_addr;
1829 int ret; 1795 int ret;
1830 1796
1831 ret = ftrace_update_record(rec, enable); 1797 ftrace_addr = ftrace_get_addr_new(rec);
1832 1798
1833 if (rec->flags & FTRACE_FL_REGS) 1799 /* This needs to be done before we call ftrace_update_record */
1834 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR; 1800 ftrace_old_addr = ftrace_get_addr_curr(rec);
1835 else 1801
1836 ftrace_addr = (unsigned long)FTRACE_ADDR; 1802 ret = ftrace_update_record(rec, enable);
1837 1803
1838 switch (ret) { 1804 switch (ret) {
1839 case FTRACE_UPDATE_IGNORE: 1805 case FTRACE_UPDATE_IGNORE:
@@ -1845,13 +1811,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1845 case FTRACE_UPDATE_MAKE_NOP: 1811 case FTRACE_UPDATE_MAKE_NOP:
1846 return ftrace_make_nop(NULL, rec, ftrace_addr); 1812 return ftrace_make_nop(NULL, rec, ftrace_addr);
1847 1813
1848 case FTRACE_UPDATE_MODIFY_CALL_REGS:
1849 case FTRACE_UPDATE_MODIFY_CALL: 1814 case FTRACE_UPDATE_MODIFY_CALL:
1850 if (rec->flags & FTRACE_FL_REGS)
1851 ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1852 else
1853 ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1854
1855 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 1815 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1856 } 1816 }
1857 1817
@@ -2115,7 +2075,6 @@ static void ftrace_startup_enable(int command)
2115 2075
2116static int ftrace_startup(struct ftrace_ops *ops, int command) 2076static int ftrace_startup(struct ftrace_ops *ops, int command)
2117{ 2077{
2118 bool hash_enable = true;
2119 int ret; 2078 int ret;
2120 2079
2121 if (unlikely(ftrace_disabled)) 2080 if (unlikely(ftrace_disabled))
@@ -2128,18 +2087,9 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2128 ftrace_start_up++; 2087 ftrace_start_up++;
2129 command |= FTRACE_UPDATE_CALLS; 2088 command |= FTRACE_UPDATE_CALLS;
2130 2089
2131 /* ops marked global share the filter hashes */
2132 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2133 ops = &global_ops;
2134 /* Don't update hash if global is already set */
2135 if (global_start_up)
2136 hash_enable = false;
2137 global_start_up++;
2138 }
2139
2140 ops->flags |= FTRACE_OPS_FL_ENABLED; 2090 ops->flags |= FTRACE_OPS_FL_ENABLED;
2141 if (hash_enable) 2091
2142 ftrace_hash_rec_enable(ops, 1); 2092 ftrace_hash_rec_enable(ops, 1);
2143 2093
2144 ftrace_startup_enable(command); 2094 ftrace_startup_enable(command);
2145 2095
@@ -2148,7 +2098,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2148 2098
2149static int ftrace_shutdown(struct ftrace_ops *ops, int command) 2099static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2150{ 2100{
2151 bool hash_disable = true;
2152 int ret; 2101 int ret;
2153 2102
2154 if (unlikely(ftrace_disabled)) 2103 if (unlikely(ftrace_disabled))
@@ -2166,21 +2115,9 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2166 */ 2115 */
2167 WARN_ON_ONCE(ftrace_start_up < 0); 2116 WARN_ON_ONCE(ftrace_start_up < 0);
2168 2117
2169 if (ops->flags & FTRACE_OPS_FL_GLOBAL) { 2118 ftrace_hash_rec_disable(ops, 1);
2170 ops = &global_ops;
2171 global_start_up--;
2172 WARN_ON_ONCE(global_start_up < 0);
2173 /* Don't update hash if global still has users */
2174 if (global_start_up) {
2175 WARN_ON_ONCE(!ftrace_start_up);
2176 hash_disable = false;
2177 }
2178 }
2179
2180 if (hash_disable)
2181 ftrace_hash_rec_disable(ops, 1);
2182 2119
2183 if (ops != &global_ops || !global_start_up) 2120 if (!global_start_up)
2184 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2121 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2185 2122
2186 command |= FTRACE_UPDATE_CALLS; 2123 command |= FTRACE_UPDATE_CALLS;
@@ -3524,10 +3461,6 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3524 struct ftrace_hash *hash; 3461 struct ftrace_hash *hash;
3525 int ret; 3462 int ret;
3526 3463
3527 /* All global ops uses the global ops filters */
3528 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3529 ops = &global_ops;
3530
3531 if (unlikely(ftrace_disabled)) 3464 if (unlikely(ftrace_disabled))
3532 return -ENODEV; 3465 return -ENODEV;
3533 3466
@@ -3639,8 +3572,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3639} 3572}
3640EXPORT_SYMBOL_GPL(ftrace_set_notrace); 3573EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3641/** 3574/**
3642 * ftrace_set_filter - set a function to filter on in ftrace 3575 * ftrace_set_global_filter - set a function to filter on with global tracers
3643 * @ops - the ops to set the filter with
3644 * @buf - the string that holds the function filter text. 3576 * @buf - the string that holds the function filter text.
3645 * @len - the length of the string. 3577 * @len - the length of the string.
3646 * @reset - non zero to reset all filters before applying this filter. 3578 * @reset - non zero to reset all filters before applying this filter.
@@ -3655,8 +3587,7 @@ void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3655EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 3587EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3656 3588
3657/** 3589/**
3658 * ftrace_set_notrace - set a function to not trace in ftrace 3590 * ftrace_set_global_notrace - set a function to not trace with global tracers
3659 * @ops - the ops to set the notrace filter with
3660 * @buf - the string that holds the function notrace text. 3591 * @buf - the string that holds the function notrace text.
3661 * @len - the length of the string. 3592 * @len - the length of the string.
3662 * @reset - non zero to reset all filters before applying this filter. 3593 * @reset - non zero to reset all filters before applying this filter.
@@ -4443,6 +4374,34 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4443 4374
4444#endif /* CONFIG_DYNAMIC_FTRACE */ 4375#endif /* CONFIG_DYNAMIC_FTRACE */
4445 4376
4377__init void ftrace_init_global_array_ops(struct trace_array *tr)
4378{
4379 tr->ops = &global_ops;
4380 tr->ops->private = tr;
4381}
4382
4383void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4384{
4385 /* If we filter on pids, update to use the pid function */
4386 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4387 if (WARN_ON(tr->ops->func != ftrace_stub))
4388 printk("ftrace ops had %pS for function\n",
4389 tr->ops->func);
4390 /* Only the top level instance does pid tracing */
4391 if (!list_empty(&ftrace_pids)) {
4392 set_ftrace_pid_function(func);
4393 func = ftrace_pid_func;
4394 }
4395 }
4396 tr->ops->func = func;
4397 tr->ops->private = tr;
4398}
4399
4400void ftrace_reset_array_ops(struct trace_array *tr)
4401{
4402 tr->ops->func = ftrace_stub;
4403}
4404
4446static void 4405static void
4447ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, 4406ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4448 struct ftrace_ops *op, struct pt_regs *regs) 4407 struct ftrace_ops *op, struct pt_regs *regs)
@@ -4501,9 +4460,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4501 */ 4460 */
4502 preempt_disable_notrace(); 4461 preempt_disable_notrace();
4503 do_for_each_ftrace_op(op, ftrace_ops_list) { 4462 do_for_each_ftrace_op(op, ftrace_ops_list) {
4504 if (ftrace_ops_test(op, ip, regs)) 4463 if (ftrace_ops_test(op, ip, regs)) {
4464 if (WARN_ON(!op->func)) {
4465 function_trace_stop = 1;
4466 printk("op=%p %pS\n", op, op);
4467 goto out;
4468 }
4505 op->func(ip, parent_ip, op, regs); 4469 op->func(ip, parent_ip, op, regs);
4470 }
4506 } while_for_each_ftrace_op(op); 4471 } while_for_each_ftrace_op(op);
4472out:
4507 preempt_enable_notrace(); 4473 preempt_enable_notrace();
4508 trace_clear_recursion(bit); 4474 trace_clear_recursion(bit);
4509} 4475}
@@ -4908,7 +4874,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
4908#ifdef CONFIG_FUNCTION_GRAPH_TRACER 4874#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4909 4875
4910static int ftrace_graph_active; 4876static int ftrace_graph_active;
4911static struct notifier_block ftrace_suspend_notifier;
4912 4877
4913int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 4878int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4914{ 4879{
@@ -5054,13 +5019,6 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5054 return NOTIFY_DONE; 5019 return NOTIFY_DONE;
5055} 5020}
5056 5021
5057/* Just a place holder for function graph */
5058static struct ftrace_ops fgraph_ops __read_mostly = {
5059 .func = ftrace_stub,
5060 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5061 FTRACE_OPS_FL_RECURSION_SAFE,
5062};
5063
5064static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) 5022static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5065{ 5023{
5066 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) 5024 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
@@ -5085,6 +5043,10 @@ static void update_function_graph_func(void)
5085 ftrace_graph_entry = ftrace_graph_entry_test; 5043 ftrace_graph_entry = ftrace_graph_entry_test;
5086} 5044}
5087 5045
5046static struct notifier_block ftrace_suspend_notifier = {
5047 .notifier_call = ftrace_suspend_notifier_call,
5048};
5049
5088int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5050int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5089 trace_func_graph_ent_t entryfunc) 5051 trace_func_graph_ent_t entryfunc)
5090{ 5052{
@@ -5098,7 +5060,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5098 goto out; 5060 goto out;
5099 } 5061 }
5100 5062
5101 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
5102 register_pm_notifier(&ftrace_suspend_notifier); 5063 register_pm_notifier(&ftrace_suspend_notifier);
5103 5064
5104 ftrace_graph_active++; 5065 ftrace_graph_active++;
@@ -5120,7 +5081,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5120 ftrace_graph_entry = ftrace_graph_entry_test; 5081 ftrace_graph_entry = ftrace_graph_entry_test;
5121 update_function_graph_func(); 5082 update_function_graph_func();
5122 5083
5123 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); 5084 /* Function graph doesn't use the .func field of global_ops */
5085 global_ops.flags |= FTRACE_OPS_FL_STUB;
5086
5087 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
5124 5088
5125out: 5089out:
5126 mutex_unlock(&ftrace_lock); 5090 mutex_unlock(&ftrace_lock);
@@ -5138,7 +5102,8 @@ void unregister_ftrace_graph(void)
5138 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5102 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5139 ftrace_graph_entry = ftrace_graph_entry_stub; 5103 ftrace_graph_entry = ftrace_graph_entry_stub;
5140 __ftrace_graph_entry = ftrace_graph_entry_stub; 5104 __ftrace_graph_entry = ftrace_graph_entry_stub;
5141 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); 5105 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
5106 global_ops.flags &= ~FTRACE_OPS_FL_STUB;
5142 unregister_pm_notifier(&ftrace_suspend_notifier); 5107 unregister_pm_notifier(&ftrace_suspend_notifier);
5143 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5108 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5144 5109
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 737b0efa1a62..16f7038d1f4d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -275,7 +275,7 @@ int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
275} 275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard); 276EXPORT_SYMBOL_GPL(call_filter_check_discard);
277 277
278cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
279{ 279{
280 u64 ts; 280 u64 ts;
281 281
@@ -599,7 +599,7 @@ static int alloc_snapshot(struct trace_array *tr)
599 return 0; 599 return 0;
600} 600}
601 601
602void free_snapshot(struct trace_array *tr) 602static void free_snapshot(struct trace_array *tr)
603{ 603{
604 /* 604 /*
605 * We don't free the ring buffer. instead, resize it because 605 * We don't free the ring buffer. instead, resize it because
@@ -963,27 +963,9 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
963 return cnt; 963 return cnt;
964} 964}
965 965
966/*
967 * ftrace_max_lock is used to protect the swapping of buffers
968 * when taking a max snapshot. The buffers themselves are
969 * protected by per_cpu spinlocks. But the action of the swap
970 * needs its own lock.
971 *
972 * This is defined as a arch_spinlock_t in order to help
973 * with performance when lockdep debugging is enabled.
974 *
975 * It is also used in other places outside the update_max_tr
976 * so it needs to be defined outside of the
977 * CONFIG_TRACER_MAX_TRACE.
978 */
979static arch_spinlock_t ftrace_max_lock =
980 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
981
982unsigned long __read_mostly tracing_thresh; 966unsigned long __read_mostly tracing_thresh;
983 967
984#ifdef CONFIG_TRACER_MAX_TRACE 968#ifdef CONFIG_TRACER_MAX_TRACE
985unsigned long __read_mostly tracing_max_latency;
986
987/* 969/*
988 * Copy the new maximum trace into the separate maximum-trace 970 * Copy the new maximum trace into the separate maximum-trace
989 * structure. (this way the maximum trace is permanently saved, 971 * structure. (this way the maximum trace is permanently saved,
@@ -1000,7 +982,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1000 max_buf->cpu = cpu; 982 max_buf->cpu = cpu;
1001 max_buf->time_start = data->preempt_timestamp; 983 max_buf->time_start = data->preempt_timestamp;
1002 984
1003 max_data->saved_latency = tracing_max_latency; 985 max_data->saved_latency = tr->max_latency;
1004 max_data->critical_start = data->critical_start; 986 max_data->critical_start = data->critical_start;
1005 max_data->critical_end = data->critical_end; 987 max_data->critical_end = data->critical_end;
1006 988
@@ -1048,14 +1030,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1048 return; 1030 return;
1049 } 1031 }
1050 1032
1051 arch_spin_lock(&ftrace_max_lock); 1033 arch_spin_lock(&tr->max_lock);
1052 1034
1053 buf = tr->trace_buffer.buffer; 1035 buf = tr->trace_buffer.buffer;
1054 tr->trace_buffer.buffer = tr->max_buffer.buffer; 1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1055 tr->max_buffer.buffer = buf; 1037 tr->max_buffer.buffer = buf;
1056 1038
1057 __update_max_tr(tr, tsk, cpu); 1039 __update_max_tr(tr, tsk, cpu);
1058 arch_spin_unlock(&ftrace_max_lock); 1040 arch_spin_unlock(&tr->max_lock);
1059} 1041}
1060 1042
1061/** 1043/**
@@ -1081,7 +1063,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1081 return; 1063 return;
1082 } 1064 }
1083 1065
1084 arch_spin_lock(&ftrace_max_lock); 1066 arch_spin_lock(&tr->max_lock);
1085 1067
1086 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1087 1069
@@ -1099,11 +1081,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1099 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1100 1082
1101 __update_max_tr(tr, tsk, cpu); 1083 __update_max_tr(tr, tsk, cpu);
1102 arch_spin_unlock(&ftrace_max_lock); 1084 arch_spin_unlock(&tr->max_lock);
1103} 1085}
1104#endif /* CONFIG_TRACER_MAX_TRACE */ 1086#endif /* CONFIG_TRACER_MAX_TRACE */
1105 1087
1106static void default_wait_pipe(struct trace_iterator *iter) 1088static void wait_on_pipe(struct trace_iterator *iter)
1107{ 1089{
1108 /* Iterators are static, they should be filled or empty */ 1090 /* Iterators are static, they should be filled or empty */
1109 if (trace_buffer_iter(iter, iter->cpu_file)) 1091 if (trace_buffer_iter(iter, iter->cpu_file))
@@ -1220,8 +1202,6 @@ int register_tracer(struct tracer *type)
1220 else 1202 else
1221 if (!type->flags->opts) 1203 if (!type->flags->opts)
1222 type->flags->opts = dummy_tracer_opt; 1204 type->flags->opts = dummy_tracer_opt;
1223 if (!type->wait_pipe)
1224 type->wait_pipe = default_wait_pipe;
1225 1205
1226 ret = run_tracer_selftest(type); 1206 ret = run_tracer_selftest(type);
1227 if (ret < 0) 1207 if (ret < 0)
@@ -1305,22 +1285,71 @@ void tracing_reset_all_online_cpus(void)
1305 } 1285 }
1306} 1286}
1307 1287
1308#define SAVED_CMDLINES 128 1288#define SAVED_CMDLINES_DEFAULT 128
1309#define NO_CMDLINE_MAP UINT_MAX 1289#define NO_CMDLINE_MAP UINT_MAX
1310static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1311static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1312static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1313static int cmdline_idx;
1314static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1290static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1291struct saved_cmdlines_buffer {
1292 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1293 unsigned *map_cmdline_to_pid;
1294 unsigned cmdline_num;
1295 int cmdline_idx;
1296 char *saved_cmdlines;
1297};
1298static struct saved_cmdlines_buffer *savedcmd;
1315 1299
1316/* temporary disable recording */ 1300/* temporary disable recording */
1317static atomic_t trace_record_cmdline_disabled __read_mostly; 1301static atomic_t trace_record_cmdline_disabled __read_mostly;
1318 1302
1319static void trace_init_cmdlines(void) 1303static inline char *get_saved_cmdlines(int idx)
1320{ 1304{
1321 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 1305 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1322 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 1306}
1323 cmdline_idx = 0; 1307
1308static inline void set_cmdline(int idx, const char *cmdline)
1309{
1310 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1311}
1312
1313static int allocate_cmdlines_buffer(unsigned int val,
1314 struct saved_cmdlines_buffer *s)
1315{
1316 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1317 GFP_KERNEL);
1318 if (!s->map_cmdline_to_pid)
1319 return -ENOMEM;
1320
1321 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1322 if (!s->saved_cmdlines) {
1323 kfree(s->map_cmdline_to_pid);
1324 return -ENOMEM;
1325 }
1326
1327 s->cmdline_idx = 0;
1328 s->cmdline_num = val;
1329 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1330 sizeof(s->map_pid_to_cmdline));
1331 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1332 val * sizeof(*s->map_cmdline_to_pid));
1333
1334 return 0;
1335}
1336
1337static int trace_create_savedcmd(void)
1338{
1339 int ret;
1340
1341 savedcmd = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
1342 if (!savedcmd)
1343 return -ENOMEM;
1344
1345 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1346 if (ret < 0) {
1347 kfree(savedcmd);
1348 savedcmd = NULL;
1349 return -ENOMEM;
1350 }
1351
1352 return 0;
1324} 1353}
1325 1354
1326int is_tracing_stopped(void) 1355int is_tracing_stopped(void)
@@ -1353,7 +1382,7 @@ void tracing_start(void)
1353 } 1382 }
1354 1383
1355 /* Prevent the buffers from switching */ 1384 /* Prevent the buffers from switching */
1356 arch_spin_lock(&ftrace_max_lock); 1385 arch_spin_lock(&global_trace.max_lock);
1357 1386
1358 buffer = global_trace.trace_buffer.buffer; 1387 buffer = global_trace.trace_buffer.buffer;
1359 if (buffer) 1388 if (buffer)
@@ -1365,7 +1394,7 @@ void tracing_start(void)
1365 ring_buffer_record_enable(buffer); 1394 ring_buffer_record_enable(buffer);
1366#endif 1395#endif
1367 1396
1368 arch_spin_unlock(&ftrace_max_lock); 1397 arch_spin_unlock(&global_trace.max_lock);
1369 1398
1370 ftrace_start(); 1399 ftrace_start();
1371 out: 1400 out:
@@ -1420,7 +1449,7 @@ void tracing_stop(void)
1420 goto out; 1449 goto out;
1421 1450
1422 /* Prevent the buffers from switching */ 1451 /* Prevent the buffers from switching */
1423 arch_spin_lock(&ftrace_max_lock); 1452 arch_spin_lock(&global_trace.max_lock);
1424 1453
1425 buffer = global_trace.trace_buffer.buffer; 1454 buffer = global_trace.trace_buffer.buffer;
1426 if (buffer) 1455 if (buffer)
@@ -1432,7 +1461,7 @@ void tracing_stop(void)
1432 ring_buffer_record_disable(buffer); 1461 ring_buffer_record_disable(buffer);
1433#endif 1462#endif
1434 1463
1435 arch_spin_unlock(&ftrace_max_lock); 1464 arch_spin_unlock(&global_trace.max_lock);
1436 1465
1437 out: 1466 out:
1438 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
@@ -1461,12 +1490,12 @@ static void tracing_stop_tr(struct trace_array *tr)
1461 1490
1462void trace_stop_cmdline_recording(void); 1491void trace_stop_cmdline_recording(void);
1463 1492
1464static void trace_save_cmdline(struct task_struct *tsk) 1493static int trace_save_cmdline(struct task_struct *tsk)
1465{ 1494{
1466 unsigned pid, idx; 1495 unsigned pid, idx;
1467 1496
1468 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1497 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1469 return; 1498 return 0;
1470 1499
1471 /* 1500 /*
1472 * It's not the end of the world if we don't get 1501 * It's not the end of the world if we don't get
@@ -1475,11 +1504,11 @@ static void trace_save_cmdline(struct task_struct *tsk)
1475 * so if we miss here, then better luck next time. 1504 * so if we miss here, then better luck next time.
1476 */ 1505 */
1477 if (!arch_spin_trylock(&trace_cmdline_lock)) 1506 if (!arch_spin_trylock(&trace_cmdline_lock))
1478 return; 1507 return 0;
1479 1508
1480 idx = map_pid_to_cmdline[tsk->pid]; 1509 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1481 if (idx == NO_CMDLINE_MAP) { 1510 if (idx == NO_CMDLINE_MAP) {
1482 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1511 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1483 1512
1484 /* 1513 /*
1485 * Check whether the cmdline buffer at idx has a pid 1514 * Check whether the cmdline buffer at idx has a pid
@@ -1487,22 +1516,24 @@ static void trace_save_cmdline(struct task_struct *tsk)
1487 * need to clear the map_pid_to_cmdline. Otherwise we 1516 * need to clear the map_pid_to_cmdline. Otherwise we
1488 * would read the new comm for the old pid. 1517 * would read the new comm for the old pid.
1489 */ 1518 */
1490 pid = map_cmdline_to_pid[idx]; 1519 pid = savedcmd->map_cmdline_to_pid[idx];
1491 if (pid != NO_CMDLINE_MAP) 1520 if (pid != NO_CMDLINE_MAP)
1492 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1521 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1493 1522
1494 map_cmdline_to_pid[idx] = tsk->pid; 1523 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1495 map_pid_to_cmdline[tsk->pid] = idx; 1524 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1496 1525
1497 cmdline_idx = idx; 1526 savedcmd->cmdline_idx = idx;
1498 } 1527 }
1499 1528
1500 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1529 set_cmdline(idx, tsk->comm);
1501 1530
1502 arch_spin_unlock(&trace_cmdline_lock); 1531 arch_spin_unlock(&trace_cmdline_lock);
1532
1533 return 1;
1503} 1534}
1504 1535
1505void trace_find_cmdline(int pid, char comm[]) 1536static void __trace_find_cmdline(int pid, char comm[])
1506{ 1537{
1507 unsigned map; 1538 unsigned map;
1508 1539
@@ -1521,13 +1552,19 @@ void trace_find_cmdline(int pid, char comm[])
1521 return; 1552 return;
1522 } 1553 }
1523 1554
1524 preempt_disable(); 1555 map = savedcmd->map_pid_to_cmdline[pid];
1525 arch_spin_lock(&trace_cmdline_lock);
1526 map = map_pid_to_cmdline[pid];
1527 if (map != NO_CMDLINE_MAP) 1556 if (map != NO_CMDLINE_MAP)
1528 strcpy(comm, saved_cmdlines[map]); 1557 strcpy(comm, get_saved_cmdlines(map));
1529 else 1558 else
1530 strcpy(comm, "<...>"); 1559 strcpy(comm, "<...>");
1560}
1561
1562void trace_find_cmdline(int pid, char comm[])
1563{
1564 preempt_disable();
1565 arch_spin_lock(&trace_cmdline_lock);
1566
1567 __trace_find_cmdline(pid, comm);
1531 1568
1532 arch_spin_unlock(&trace_cmdline_lock); 1569 arch_spin_unlock(&trace_cmdline_lock);
1533 preempt_enable(); 1570 preempt_enable();
@@ -1541,9 +1578,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
1541 if (!__this_cpu_read(trace_cmdline_save)) 1578 if (!__this_cpu_read(trace_cmdline_save))
1542 return; 1579 return;
1543 1580
1544 __this_cpu_write(trace_cmdline_save, false); 1581 if (trace_save_cmdline(tsk))
1545 1582 __this_cpu_write(trace_cmdline_save, false);
1546 trace_save_cmdline(tsk);
1547} 1583}
1548 1584
1549void 1585void
@@ -1746,7 +1782,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1746 */ 1782 */
1747 barrier(); 1783 barrier();
1748 if (use_stack == 1) { 1784 if (use_stack == 1) {
1749 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; 1785 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1750 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1786 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1751 1787
1752 if (regs) 1788 if (regs)
@@ -1995,7 +2031,21 @@ void trace_printk_init_buffers(void)
1995 if (alloc_percpu_trace_buffer()) 2031 if (alloc_percpu_trace_buffer())
1996 return; 2032 return;
1997 2033
1998 pr_info("ftrace: Allocated trace_printk buffers\n"); 2034 /* trace_printk() is for debug use only. Don't use it in production. */
2035
2036 pr_warning("\n**********************************************************\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("** **\n");
2039 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2040 pr_warning("** **\n");
2041 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2042 pr_warning("** unsafe for produciton use. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** If you see this message and you are not debugging **\n");
2045 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2048 pr_warning("**********************************************************\n");
1999 2049
2000 /* Expand the buffers to set size */ 2050 /* Expand the buffers to set size */
2001 tracing_update_buffers(); 2051 tracing_update_buffers();
@@ -3333,7 +3383,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3333 mutex_lock(&tracing_cpumask_update_lock); 3383 mutex_lock(&tracing_cpumask_update_lock);
3334 3384
3335 local_irq_disable(); 3385 local_irq_disable();
3336 arch_spin_lock(&ftrace_max_lock); 3386 arch_spin_lock(&tr->max_lock);
3337 for_each_tracing_cpu(cpu) { 3387 for_each_tracing_cpu(cpu) {
3338 /* 3388 /*
3339 * Increase/decrease the disabled counter if we are 3389 * Increase/decrease the disabled counter if we are
@@ -3350,7 +3400,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3350 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3400 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3351 } 3401 }
3352 } 3402 }
3353 arch_spin_unlock(&ftrace_max_lock); 3403 arch_spin_unlock(&tr->max_lock);
3354 local_irq_enable(); 3404 local_irq_enable();
3355 3405
3356 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3406 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
@@ -3592,6 +3642,7 @@ static const char readme_msg[] =
3592 " trace_options\t\t- Set format or modify how tracing happens\n" 3642 " trace_options\t\t- Set format or modify how tracing happens\n"
3593 "\t\t\t Disable an option by adding a suffix 'no' to the\n" 3643 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3594 "\t\t\t option name\n" 3644 "\t\t\t option name\n"
3645 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3595#ifdef CONFIG_DYNAMIC_FTRACE 3646#ifdef CONFIG_DYNAMIC_FTRACE
3596 "\n available_filter_functions - list of functions that can be filtered on\n" 3647 "\n available_filter_functions - list of functions that can be filtered on\n"
3597 " set_ftrace_filter\t- echo function name in here to only trace these\n" 3648 " set_ftrace_filter\t- echo function name in here to only trace these\n"
@@ -3705,55 +3756,153 @@ static const struct file_operations tracing_readme_fops = {
3705 .llseek = generic_file_llseek, 3756 .llseek = generic_file_llseek,
3706}; 3757};
3707 3758
3759static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3760{
3761 unsigned int *ptr = v;
3762
3763 if (*pos || m->count)
3764 ptr++;
3765
3766 (*pos)++;
3767
3768 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3769 ptr++) {
3770 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3771 continue;
3772
3773 return ptr;
3774 }
3775
3776 return NULL;
3777}
3778
3779static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3780{
3781 void *v;
3782 loff_t l = 0;
3783
3784 preempt_disable();
3785 arch_spin_lock(&trace_cmdline_lock);
3786
3787 v = &savedcmd->map_cmdline_to_pid[0];
3788 while (l <= *pos) {
3789 v = saved_cmdlines_next(m, v, &l);
3790 if (!v)
3791 return NULL;
3792 }
3793
3794 return v;
3795}
3796
3797static void saved_cmdlines_stop(struct seq_file *m, void *v)
3798{
3799 arch_spin_unlock(&trace_cmdline_lock);
3800 preempt_enable();
3801}
3802
3803static int saved_cmdlines_show(struct seq_file *m, void *v)
3804{
3805 char buf[TASK_COMM_LEN];
3806 unsigned int *pid = v;
3807
3808 __trace_find_cmdline(*pid, buf);
3809 seq_printf(m, "%d %s\n", *pid, buf);
3810 return 0;
3811}
3812
3813static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3814 .start = saved_cmdlines_start,
3815 .next = saved_cmdlines_next,
3816 .stop = saved_cmdlines_stop,
3817 .show = saved_cmdlines_show,
3818};
3819
3820static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3821{
3822 if (tracing_disabled)
3823 return -ENODEV;
3824
3825 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3826}
3827
3828static const struct file_operations tracing_saved_cmdlines_fops = {
3829 .open = tracing_saved_cmdlines_open,
3830 .read = seq_read,
3831 .llseek = seq_lseek,
3832 .release = seq_release,
3833};
3834
3708static ssize_t 3835static ssize_t
3709tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 3836tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3710 size_t cnt, loff_t *ppos) 3837 size_t cnt, loff_t *ppos)
3711{ 3838{
3712 char *buf_comm; 3839 char buf[64];
3713 char *file_buf; 3840 int r;
3714 char *buf; 3841
3715 int len = 0; 3842 arch_spin_lock(&trace_cmdline_lock);
3716 int pid; 3843 r = sprintf(buf, "%u\n", savedcmd->cmdline_num);
3717 int i; 3844 arch_spin_unlock(&trace_cmdline_lock);
3845
3846 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3847}
3848
3849static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3850{
3851 kfree(s->saved_cmdlines);
3852 kfree(s->map_cmdline_to_pid);
3853 kfree(s);
3854}
3855
3856static int tracing_resize_saved_cmdlines(unsigned int val)
3857{
3858 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3718 3859
3719 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 3860 s = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
3720 if (!file_buf) 3861 if (!s)
3721 return -ENOMEM; 3862 return -ENOMEM;
3722 3863
3723 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 3864 if (allocate_cmdlines_buffer(val, s) < 0) {
3724 if (!buf_comm) { 3865 kfree(s);
3725 kfree(file_buf);
3726 return -ENOMEM; 3866 return -ENOMEM;
3727 } 3867 }
3728 3868
3729 buf = file_buf; 3869 arch_spin_lock(&trace_cmdline_lock);
3870 savedcmd_temp = savedcmd;
3871 savedcmd = s;
3872 arch_spin_unlock(&trace_cmdline_lock);
3873 free_saved_cmdlines_buffer(savedcmd_temp);
3730 3874
3731 for (i = 0; i < SAVED_CMDLINES; i++) { 3875 return 0;
3732 int r; 3876}
3733 3877
3734 pid = map_cmdline_to_pid[i]; 3878static ssize_t
3735 if (pid == -1 || pid == NO_CMDLINE_MAP) 3879tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3736 continue; 3880 size_t cnt, loff_t *ppos)
3881{
3882 unsigned long val;
3883 int ret;
3737 3884
3738 trace_find_cmdline(pid, buf_comm); 3885 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3739 r = sprintf(buf, "%d %s\n", pid, buf_comm); 3886 if (ret)
3740 buf += r; 3887 return ret;
3741 len += r;
3742 }
3743 3888
3744 len = simple_read_from_buffer(ubuf, cnt, ppos, 3889 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3745 file_buf, len); 3890 if (!val || val > PID_MAX_DEFAULT)
3891 return -EINVAL;
3746 3892
3747 kfree(file_buf); 3893 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3748 kfree(buf_comm); 3894 if (ret < 0)
3895 return ret;
3749 3896
3750 return len; 3897 *ppos += cnt;
3898
3899 return cnt;
3751} 3900}
3752 3901
3753static const struct file_operations tracing_saved_cmdlines_fops = { 3902static const struct file_operations tracing_saved_cmdlines_size_fops = {
3754 .open = tracing_open_generic, 3903 .open = tracing_open_generic,
3755 .read = tracing_saved_cmdlines_read, 3904 .read = tracing_saved_cmdlines_size_read,
3756 .llseek = generic_file_llseek, 3905 .write = tracing_saved_cmdlines_size_write,
3757}; 3906};
3758 3907
3759static ssize_t 3908static ssize_t
@@ -4225,25 +4374,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4225 return trace_poll(iter, filp, poll_table); 4374 return trace_poll(iter, filp, poll_table);
4226} 4375}
4227 4376
4228/*
4229 * This is a make-shift waitqueue.
4230 * A tracer might use this callback on some rare cases:
4231 *
4232 * 1) the current tracer might hold the runqueue lock when it wakes up
4233 * a reader, hence a deadlock (sched, function, and function graph tracers)
4234 * 2) the function tracers, trace all functions, we don't want
4235 * the overhead of calling wake_up and friends
4236 * (and tracing them too)
4237 *
4238 * Anyway, this is really very primitive wakeup.
4239 */
4240void poll_wait_pipe(struct trace_iterator *iter)
4241{
4242 set_current_state(TASK_INTERRUPTIBLE);
4243 /* sleep for 100 msecs, and try again. */
4244 schedule_timeout(HZ / 10);
4245}
4246
4247/* Must be called with trace_types_lock mutex held. */ 4377/* Must be called with trace_types_lock mutex held. */
4248static int tracing_wait_pipe(struct file *filp) 4378static int tracing_wait_pipe(struct file *filp)
4249{ 4379{
@@ -4255,15 +4385,6 @@ static int tracing_wait_pipe(struct file *filp)
4255 return -EAGAIN; 4385 return -EAGAIN;
4256 } 4386 }
4257 4387
4258 mutex_unlock(&iter->mutex);
4259
4260 iter->trace->wait_pipe(iter);
4261
4262 mutex_lock(&iter->mutex);
4263
4264 if (signal_pending(current))
4265 return -EINTR;
4266
4267 /* 4388 /*
4268 * We block until we read something and tracing is disabled. 4389 * We block until we read something and tracing is disabled.
4269 * We still block if tracing is disabled, but we have never 4390 * We still block if tracing is disabled, but we have never
@@ -4275,6 +4396,15 @@ static int tracing_wait_pipe(struct file *filp)
4275 */ 4396 */
4276 if (!tracing_is_on() && iter->pos) 4397 if (!tracing_is_on() && iter->pos)
4277 break; 4398 break;
4399
4400 mutex_unlock(&iter->mutex);
4401
4402 wait_on_pipe(iter);
4403
4404 mutex_lock(&iter->mutex);
4405
4406 if (signal_pending(current))
4407 return -EINTR;
4278 } 4408 }
4279 4409
4280 return 1; 4410 return 1;
@@ -5197,7 +5327,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5197 goto out_unlock; 5327 goto out_unlock;
5198 } 5328 }
5199 mutex_unlock(&trace_types_lock); 5329 mutex_unlock(&trace_types_lock);
5200 iter->trace->wait_pipe(iter); 5330 wait_on_pipe(iter);
5201 mutex_lock(&trace_types_lock); 5331 mutex_lock(&trace_types_lock);
5202 if (signal_pending(current)) { 5332 if (signal_pending(current)) {
5203 size = -EINTR; 5333 size = -EINTR;
@@ -5408,7 +5538,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5408 goto out; 5538 goto out;
5409 } 5539 }
5410 mutex_unlock(&trace_types_lock); 5540 mutex_unlock(&trace_types_lock);
5411 iter->trace->wait_pipe(iter); 5541 wait_on_pipe(iter);
5412 mutex_lock(&trace_types_lock); 5542 mutex_lock(&trace_types_lock);
5413 if (signal_pending(current)) { 5543 if (signal_pending(current)) {
5414 ret = -EINTR; 5544 ret = -EINTR;
@@ -6102,6 +6232,25 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
6102 return 0; 6232 return 0;
6103} 6233}
6104 6234
6235static void free_trace_buffers(struct trace_array *tr)
6236{
6237 if (!tr)
6238 return;
6239
6240 if (tr->trace_buffer.buffer) {
6241 ring_buffer_free(tr->trace_buffer.buffer);
6242 tr->trace_buffer.buffer = NULL;
6243 free_percpu(tr->trace_buffer.data);
6244 }
6245
6246#ifdef CONFIG_TRACER_MAX_TRACE
6247 if (tr->max_buffer.buffer) {
6248 ring_buffer_free(tr->max_buffer.buffer);
6249 tr->max_buffer.buffer = NULL;
6250 }
6251#endif
6252}
6253
6105static int new_instance_create(const char *name) 6254static int new_instance_create(const char *name)
6106{ 6255{
6107 struct trace_array *tr; 6256 struct trace_array *tr;
@@ -6131,6 +6280,8 @@ static int new_instance_create(const char *name)
6131 6280
6132 raw_spin_lock_init(&tr->start_lock); 6281 raw_spin_lock_init(&tr->start_lock);
6133 6282
6283 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6284
6134 tr->current_trace = &nop_trace; 6285 tr->current_trace = &nop_trace;
6135 6286
6136 INIT_LIST_HEAD(&tr->systems); 6287 INIT_LIST_HEAD(&tr->systems);
@@ -6158,8 +6309,7 @@ static int new_instance_create(const char *name)
6158 return 0; 6309 return 0;
6159 6310
6160 out_free_tr: 6311 out_free_tr:
6161 if (tr->trace_buffer.buffer) 6312 free_trace_buffers(tr);
6162 ring_buffer_free(tr->trace_buffer.buffer);
6163 free_cpumask_var(tr->tracing_cpumask); 6313 free_cpumask_var(tr->tracing_cpumask);
6164 kfree(tr->name); 6314 kfree(tr->name);
6165 kfree(tr); 6315 kfree(tr);
@@ -6199,8 +6349,7 @@ static int instance_delete(const char *name)
6199 event_trace_del_tracer(tr); 6349 event_trace_del_tracer(tr);
6200 ftrace_destroy_function_files(tr); 6350 ftrace_destroy_function_files(tr);
6201 debugfs_remove_recursive(tr->dir); 6351 debugfs_remove_recursive(tr->dir);
6202 free_percpu(tr->trace_buffer.data); 6352 free_trace_buffers(tr);
6203 ring_buffer_free(tr->trace_buffer.buffer);
6204 6353
6205 kfree(tr->name); 6354 kfree(tr->name);
6206 kfree(tr); 6355 kfree(tr);
@@ -6328,6 +6477,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6328 trace_create_file("tracing_on", 0644, d_tracer, 6477 trace_create_file("tracing_on", 0644, d_tracer,
6329 tr, &rb_simple_fops); 6478 tr, &rb_simple_fops);
6330 6479
6480#ifdef CONFIG_TRACER_MAX_TRACE
6481 trace_create_file("tracing_max_latency", 0644, d_tracer,
6482 &tr->max_latency, &tracing_max_lat_fops);
6483#endif
6484
6331 if (ftrace_create_function_files(tr, d_tracer)) 6485 if (ftrace_create_function_files(tr, d_tracer))
6332 WARN(1, "Could not allocate function filter files"); 6486 WARN(1, "Could not allocate function filter files");
6333 6487
@@ -6353,11 +6507,6 @@ static __init int tracer_init_debugfs(void)
6353 6507
6354 init_tracer_debugfs(&global_trace, d_tracer); 6508 init_tracer_debugfs(&global_trace, d_tracer);
6355 6509
6356#ifdef CONFIG_TRACER_MAX_TRACE
6357 trace_create_file("tracing_max_latency", 0644, d_tracer,
6358 &tracing_max_latency, &tracing_max_lat_fops);
6359#endif
6360
6361 trace_create_file("tracing_thresh", 0644, d_tracer, 6510 trace_create_file("tracing_thresh", 0644, d_tracer,
6362 &tracing_thresh, &tracing_max_lat_fops); 6511 &tracing_thresh, &tracing_max_lat_fops);
6363 6512
@@ -6367,6 +6516,9 @@ static __init int tracer_init_debugfs(void)
6367 trace_create_file("saved_cmdlines", 0444, d_tracer, 6516 trace_create_file("saved_cmdlines", 0444, d_tracer,
6368 NULL, &tracing_saved_cmdlines_fops); 6517 NULL, &tracing_saved_cmdlines_fops);
6369 6518
6519 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6520 NULL, &tracing_saved_cmdlines_size_fops);
6521
6370#ifdef CONFIG_DYNAMIC_FTRACE 6522#ifdef CONFIG_DYNAMIC_FTRACE
6371 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 6523 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6372 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 6524 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -6603,18 +6755,19 @@ __init static int tracer_alloc_buffers(void)
6603 if (!temp_buffer) 6755 if (!temp_buffer)
6604 goto out_free_cpumask; 6756 goto out_free_cpumask;
6605 6757
6758 if (trace_create_savedcmd() < 0)
6759 goto out_free_temp_buffer;
6760
6606 /* TODO: make the number of buffers hot pluggable with CPUS */ 6761 /* TODO: make the number of buffers hot pluggable with CPUS */
6607 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 6762 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6608 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 6763 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6609 WARN_ON(1); 6764 WARN_ON(1);
6610 goto out_free_temp_buffer; 6765 goto out_free_savedcmd;
6611 } 6766 }
6612 6767
6613 if (global_trace.buffer_disabled) 6768 if (global_trace.buffer_disabled)
6614 tracing_off(); 6769 tracing_off();
6615 6770
6616 trace_init_cmdlines();
6617
6618 if (trace_boot_clock) { 6771 if (trace_boot_clock) {
6619 ret = tracing_set_clock(&global_trace, trace_boot_clock); 6772 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6620 if (ret < 0) 6773 if (ret < 0)
@@ -6629,6 +6782,10 @@ __init static int tracer_alloc_buffers(void)
6629 */ 6782 */
6630 global_trace.current_trace = &nop_trace; 6783 global_trace.current_trace = &nop_trace;
6631 6784
6785 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6786
6787 ftrace_init_global_array_ops(&global_trace);
6788
6632 register_tracer(&nop_trace); 6789 register_tracer(&nop_trace);
6633 6790
6634 /* All seems OK, enable tracing */ 6791 /* All seems OK, enable tracing */
@@ -6656,13 +6813,11 @@ __init static int tracer_alloc_buffers(void)
6656 6813
6657 return 0; 6814 return 0;
6658 6815
6816out_free_savedcmd:
6817 free_saved_cmdlines_buffer(savedcmd);
6659out_free_temp_buffer: 6818out_free_temp_buffer:
6660 ring_buffer_free(temp_buffer); 6819 ring_buffer_free(temp_buffer);
6661out_free_cpumask: 6820out_free_cpumask:
6662 free_percpu(global_trace.trace_buffer.data);
6663#ifdef CONFIG_TRACER_MAX_TRACE
6664 free_percpu(global_trace.max_buffer.data);
6665#endif
6666 free_cpumask_var(global_trace.tracing_cpumask); 6821 free_cpumask_var(global_trace.tracing_cpumask);
6667out_free_buffer_mask: 6822out_free_buffer_mask:
6668 free_cpumask_var(tracing_buffer_mask); 6823 free_cpumask_var(tracing_buffer_mask);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2e29d7ba5a52..9e82551dd566 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -190,7 +190,22 @@ struct trace_array {
190 */ 190 */
191 struct trace_buffer max_buffer; 191 struct trace_buffer max_buffer;
192 bool allocated_snapshot; 192 bool allocated_snapshot;
193 unsigned long max_latency;
193#endif 194#endif
195 /*
196 * max_lock is used to protect the swapping of buffers
197 * when taking a max snapshot. The buffers themselves are
198 * protected by per_cpu spinlocks. But the action of the swap
199 * needs its own lock.
200 *
201 * This is defined as a arch_spinlock_t in order to help
202 * with performance when lockdep debugging is enabled.
203 *
204 * It is also used in other places outside the update_max_tr
205 * so it needs to be defined outside of the
206 * CONFIG_TRACER_MAX_TRACE.
207 */
208 arch_spinlock_t max_lock;
194 int buffer_disabled; 209 int buffer_disabled;
195#ifdef CONFIG_FTRACE_SYSCALLS 210#ifdef CONFIG_FTRACE_SYSCALLS
196 int sys_refcount_enter; 211 int sys_refcount_enter;
@@ -237,6 +252,9 @@ static inline struct trace_array *top_trace_array(void)
237{ 252{
238 struct trace_array *tr; 253 struct trace_array *tr;
239 254
255 if (list_empty(ftrace_trace_arrays.prev))
256 return NULL;
257
240 tr = list_entry(ftrace_trace_arrays.prev, 258 tr = list_entry(ftrace_trace_arrays.prev,
241 typeof(*tr), list); 259 typeof(*tr), list);
242 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 260 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
@@ -323,7 +341,6 @@ struct tracer_flags {
323 * @stop: called when tracing is paused (echo 0 > tracing_enabled) 341 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
324 * @open: called when the trace file is opened 342 * @open: called when the trace file is opened
325 * @pipe_open: called when the trace_pipe file is opened 343 * @pipe_open: called when the trace_pipe file is opened
326 * @wait_pipe: override how the user waits for traces on trace_pipe
327 * @close: called when the trace file is released 344 * @close: called when the trace file is released
328 * @pipe_close: called when the trace_pipe file is released 345 * @pipe_close: called when the trace_pipe file is released
329 * @read: override the default read callback on trace_pipe 346 * @read: override the default read callback on trace_pipe
@@ -342,7 +359,6 @@ struct tracer {
342 void (*stop)(struct trace_array *tr); 359 void (*stop)(struct trace_array *tr);
343 void (*open)(struct trace_iterator *iter); 360 void (*open)(struct trace_iterator *iter);
344 void (*pipe_open)(struct trace_iterator *iter); 361 void (*pipe_open)(struct trace_iterator *iter);
345 void (*wait_pipe)(struct trace_iterator *iter);
346 void (*close)(struct trace_iterator *iter); 362 void (*close)(struct trace_iterator *iter);
347 void (*pipe_close)(struct trace_iterator *iter); 363 void (*pipe_close)(struct trace_iterator *iter);
348 ssize_t (*read)(struct trace_iterator *iter, 364 ssize_t (*read)(struct trace_iterator *iter,
@@ -416,13 +432,7 @@ enum {
416 TRACE_FTRACE_IRQ_BIT, 432 TRACE_FTRACE_IRQ_BIT,
417 TRACE_FTRACE_SIRQ_BIT, 433 TRACE_FTRACE_SIRQ_BIT,
418 434
419 /* GLOBAL_BITs must be greater than FTRACE_BITs */ 435 /* INTERNAL_BITs must be greater than FTRACE_BITs */
420 TRACE_GLOBAL_BIT,
421 TRACE_GLOBAL_NMI_BIT,
422 TRACE_GLOBAL_IRQ_BIT,
423 TRACE_GLOBAL_SIRQ_BIT,
424
425 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
426 TRACE_INTERNAL_BIT, 436 TRACE_INTERNAL_BIT,
427 TRACE_INTERNAL_NMI_BIT, 437 TRACE_INTERNAL_NMI_BIT,
428 TRACE_INTERNAL_IRQ_BIT, 438 TRACE_INTERNAL_IRQ_BIT,
@@ -449,9 +459,6 @@ enum {
449#define TRACE_FTRACE_START TRACE_FTRACE_BIT 459#define TRACE_FTRACE_START TRACE_FTRACE_BIT
450#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 460#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
451 461
452#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
453#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
454
455#define TRACE_LIST_START TRACE_INTERNAL_BIT 462#define TRACE_LIST_START TRACE_INTERNAL_BIT
456#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 463#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
457 464
@@ -560,8 +567,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
560 567
561void tracing_iter_reset(struct trace_iterator *iter, int cpu); 568void tracing_iter_reset(struct trace_iterator *iter, int cpu);
562 569
563void poll_wait_pipe(struct trace_iterator *iter);
564
565void tracing_sched_switch_trace(struct trace_array *tr, 570void tracing_sched_switch_trace(struct trace_array *tr,
566 struct task_struct *prev, 571 struct task_struct *prev,
567 struct task_struct *next, 572 struct task_struct *next,
@@ -608,8 +613,6 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs);
608extern unsigned long tracing_thresh; 613extern unsigned long tracing_thresh;
609 614
610#ifdef CONFIG_TRACER_MAX_TRACE 615#ifdef CONFIG_TRACER_MAX_TRACE
611extern unsigned long tracing_max_latency;
612
613void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 616void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
614void update_max_tr_single(struct trace_array *tr, 617void update_max_tr_single(struct trace_array *tr,
615 struct task_struct *tsk, int cpu); 618 struct task_struct *tsk, int cpu);
@@ -724,6 +727,8 @@ extern unsigned long trace_flags;
724#define TRACE_GRAPH_PRINT_PROC 0x8 727#define TRACE_GRAPH_PRINT_PROC 0x8
725#define TRACE_GRAPH_PRINT_DURATION 0x10 728#define TRACE_GRAPH_PRINT_DURATION 0x10
726#define TRACE_GRAPH_PRINT_ABS_TIME 0x20 729#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
730#define TRACE_GRAPH_PRINT_IRQS 0x40
731#define TRACE_GRAPH_PRINT_TAIL 0x80
727#define TRACE_GRAPH_PRINT_FILL_SHIFT 28 732#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
728#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 733#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
729 734
@@ -823,6 +828,10 @@ extern int ftrace_is_dead(void);
823int ftrace_create_function_files(struct trace_array *tr, 828int ftrace_create_function_files(struct trace_array *tr,
824 struct dentry *parent); 829 struct dentry *parent);
825void ftrace_destroy_function_files(struct trace_array *tr); 830void ftrace_destroy_function_files(struct trace_array *tr);
831void ftrace_init_global_array_ops(struct trace_array *tr);
832void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
833void ftrace_reset_array_ops(struct trace_array *tr);
834int using_ftrace_ops_list_func(void);
826#else 835#else
827static inline int ftrace_trace_task(struct task_struct *task) 836static inline int ftrace_trace_task(struct task_struct *task)
828{ 837{
@@ -836,6 +845,11 @@ ftrace_create_function_files(struct trace_array *tr,
836 return 0; 845 return 0;
837} 846}
838static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 847static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
848static inline __init void
849ftrace_init_global_array_ops(struct trace_array *tr) { }
850static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
851/* ftace_func_t type is not defined, use macro instead of static inline */
852#define ftrace_init_array_ops(tr, func) do { } while (0)
839#endif /* CONFIG_FUNCTION_TRACER */ 853#endif /* CONFIG_FUNCTION_TRACER */
840 854
841#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 855#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
new file mode 100644
index 000000000000..40a14cbcf8e0
--- /dev/null
+++ b/kernel/trace/trace_benchmark.c
@@ -0,0 +1,198 @@
1#include <linux/delay.h>
2#include <linux/module.h>
3#include <linux/kthread.h>
4#include <linux/trace_clock.h>
5
6#define CREATE_TRACE_POINTS
7#include "trace_benchmark.h"
8
9static struct task_struct *bm_event_thread;
10
11static char bm_str[BENCHMARK_EVENT_STRLEN] = "START";
12
13static u64 bm_total;
14static u64 bm_totalsq;
15static u64 bm_last;
16static u64 bm_max;
17static u64 bm_min;
18static u64 bm_first;
19static u64 bm_cnt;
20static u64 bm_stddev;
21static unsigned int bm_avg;
22static unsigned int bm_std;
23
24/*
25 * This gets called in a loop recording the time it took to write
26 * the tracepoint. What it writes is the time statistics of the last
27 * tracepoint write. As there is nothing to write the first time
28 * it simply writes "START". As the first write is cold cache and
29 * the rest is hot, we save off that time in bm_first and it is
30 * reported as "first", which is shown in the second write to the
31 * tracepoint. The "first" field is writen within the statics from
32 * then on but never changes.
33 */
34static void trace_do_benchmark(void)
35{
36 u64 start;
37 u64 stop;
38 u64 delta;
39 u64 stddev;
40 u64 seed;
41 u64 last_seed;
42 unsigned int avg;
43 unsigned int std = 0;
44
45 /* Only run if the tracepoint is actually active */
46 if (!trace_benchmark_event_enabled())
47 return;
48
49 local_irq_disable();
50 start = trace_clock_local();
51 trace_benchmark_event(bm_str);
52 stop = trace_clock_local();
53 local_irq_enable();
54
55 bm_cnt++;
56
57 delta = stop - start;
58
59 /*
60 * The first read is cold cached, keep it separate from the
61 * other calculations.
62 */
63 if (bm_cnt == 1) {
64 bm_first = delta;
65 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
66 "first=%llu [COLD CACHED]", bm_first);
67 return;
68 }
69
70 bm_last = delta;
71
72 if (delta > bm_max)
73 bm_max = delta;
74 if (!bm_min || delta < bm_min)
75 bm_min = delta;
76
77 /*
78 * When bm_cnt is greater than UINT_MAX, it breaks the statistics
79 * accounting. Freeze the statistics when that happens.
80 * We should have enough data for the avg and stddev anyway.
81 */
82 if (bm_cnt > UINT_MAX) {
83 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
84 "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
85 bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev);
86 return;
87 }
88
89 bm_total += delta;
90 bm_totalsq += delta * delta;
91
92
93 if (bm_cnt > 1) {
94 /*
95 * Apply Welford's method to calculate standard deviation:
96 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
97 */
98 stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total;
99 do_div(stddev, (u32)bm_cnt);
100 do_div(stddev, (u32)bm_cnt - 1);
101 } else
102 stddev = 0;
103
104 delta = bm_total;
105 do_div(delta, bm_cnt);
106 avg = delta;
107
108 if (stddev > 0) {
109 int i = 0;
110 /*
111 * stddev is the square of standard deviation but
112 * we want the actualy number. Use the average
113 * as our seed to find the std.
114 *
115 * The next try is:
116 * x = (x + N/x) / 2
117 *
118 * Where N is the squared number to find the square
119 * root of.
120 */
121 seed = avg;
122 do {
123 last_seed = seed;
124 seed = stddev;
125 if (!last_seed)
126 break;
127 do_div(seed, last_seed);
128 seed += last_seed;
129 do_div(seed, 2);
130 } while (i++ < 10 && last_seed != seed);
131
132 std = seed;
133 }
134
135 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
136 "last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld",
137 bm_last, bm_first, bm_max, bm_min, avg, std, stddev);
138
139 bm_std = std;
140 bm_avg = avg;
141 bm_stddev = stddev;
142}
143
144static int benchmark_event_kthread(void *arg)
145{
146 /* sleep a bit to make sure the tracepoint gets activated */
147 msleep(100);
148
149 while (!kthread_should_stop()) {
150
151 trace_do_benchmark();
152
153 /*
154 * We don't go to sleep, but let others
155 * run as well.
156 */
157 cond_resched();
158 }
159
160 return 0;
161}
162
163/*
164 * When the benchmark tracepoint is enabled, it calls this
165 * function and the thread that calls the tracepoint is created.
166 */
167void trace_benchmark_reg(void)
168{
169 bm_event_thread = kthread_run(benchmark_event_kthread,
170 NULL, "event_benchmark");
171 WARN_ON(!bm_event_thread);
172}
173
174/*
175 * When the benchmark tracepoint is disabled, it calls this
176 * function and the thread that calls the tracepoint is deleted
177 * and all the numbers are reset.
178 */
179void trace_benchmark_unreg(void)
180{
181 if (!bm_event_thread)
182 return;
183
184 kthread_stop(bm_event_thread);
185
186 strcpy(bm_str, "START");
187 bm_total = 0;
188 bm_totalsq = 0;
189 bm_last = 0;
190 bm_max = 0;
191 bm_min = 0;
192 bm_cnt = 0;
193 /* These don't need to be reset but reset them anyway */
194 bm_first = 0;
195 bm_std = 0;
196 bm_avg = 0;
197 bm_stddev = 0;
198}
diff --git a/kernel/trace/trace_benchmark.h b/kernel/trace/trace_benchmark.h
new file mode 100644
index 000000000000..3c1df1df4e29
--- /dev/null
+++ b/kernel/trace/trace_benchmark.h
@@ -0,0 +1,41 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM benchmark
3
4#if !defined(_TRACE_BENCHMARK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BENCHMARK_H
6
7#include <linux/tracepoint.h>
8
9extern void trace_benchmark_reg(void);
10extern void trace_benchmark_unreg(void);
11
12#define BENCHMARK_EVENT_STRLEN 128
13
14TRACE_EVENT_FN(benchmark_event,
15
16 TP_PROTO(const char *str),
17
18 TP_ARGS(str),
19
20 TP_STRUCT__entry(
21 __array( char, str, BENCHMARK_EVENT_STRLEN )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->str, str, BENCHMARK_EVENT_STRLEN);
26 ),
27
28 TP_printk("%s", __entry->str),
29
30 trace_benchmark_reg, trace_benchmark_unreg
31);
32
33#endif /* _TRACE_BENCHMARK_H */
34
35#undef TRACE_INCLUDE_FILE
36#undef TRACE_INCLUDE_PATH
37#define TRACE_INCLUDE_PATH .
38#define TRACE_INCLUDE_FILE trace_benchmark
39
40/* This part must be outside protection */
41#include <trace/define_trace.h>
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 3ddfd8f62c05..f99e0b3bca8c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -574,6 +574,9 @@ int trace_set_clr_event(const char *system, const char *event, int set)
574{ 574{
575 struct trace_array *tr = top_trace_array(); 575 struct trace_array *tr = top_trace_array();
576 576
577 if (!tr)
578 return -ENODEV;
579
577 return __ftrace_set_clr_event(tr, NULL, system, event, set); 580 return __ftrace_set_clr_event(tr, NULL, system, event, set);
578} 581}
579EXPORT_SYMBOL_GPL(trace_set_clr_event); 582EXPORT_SYMBOL_GPL(trace_set_clr_event);
@@ -2065,6 +2068,9 @@ event_enable_func(struct ftrace_hash *hash,
2065 bool enable; 2068 bool enable;
2066 int ret; 2069 int ret;
2067 2070
2071 if (!tr)
2072 return -ENODEV;
2073
2068 /* hash funcs only work with set_ftrace_filter */ 2074 /* hash funcs only work with set_ftrace_filter */
2069 if (!enabled || !param) 2075 if (!enabled || !param)
2070 return -EINVAL; 2076 return -EINVAL;
@@ -2396,6 +2402,9 @@ static __init int event_trace_enable(void)
2396 char *token; 2402 char *token;
2397 int ret; 2403 int ret;
2398 2404
2405 if (!tr)
2406 return -ENODEV;
2407
2399 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 2408 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2400 2409
2401 call = *iter; 2410 call = *iter;
@@ -2442,6 +2451,8 @@ static __init int event_trace_init(void)
2442 int ret; 2451 int ret;
2443 2452
2444 tr = top_trace_array(); 2453 tr = top_trace_array();
2454 if (!tr)
2455 return -ENODEV;
2445 2456
2446 d_tracer = tracing_init_dentry(); 2457 d_tracer = tracing_init_dentry();
2447 if (!d_tracer) 2458 if (!d_tracer)
@@ -2535,6 +2546,8 @@ static __init void event_trace_self_tests(void)
2535 int ret; 2546 int ret;
2536 2547
2537 tr = top_trace_array(); 2548 tr = top_trace_array();
2549 if (!tr)
2550 return;
2538 2551
2539 pr_info("Running tests on trace events:\n"); 2552 pr_info("Running tests on trace events:\n");
2540 2553
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index ffd56351b521..57f0ec962d2c 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -26,8 +26,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
26static void 26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs); 28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags; 29static struct tracer_flags func_flags;
32 30
33/* Our option */ 31/* Our option */
@@ -83,28 +81,24 @@ void ftrace_destroy_function_files(struct trace_array *tr)
83 81
84static int function_trace_init(struct trace_array *tr) 82static int function_trace_init(struct trace_array *tr)
85{ 83{
86 struct ftrace_ops *ops; 84 ftrace_func_t func;
87
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
93 }
94 85
95 if (func_flags.val & TRACE_FUNC_OPT_STACK) 86 /*
96 ops = &trace_stack_ops; 87 * Instance trace_arrays get their ops allocated
97 else 88 * at instance creation. Unless it failed
98 ops = &trace_ops; 89 * the allocation.
99 tr->ops = ops; 90 */
100 } else if (!tr->ops) { 91 if (!tr->ops)
101 /*
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
104 * the allocation.
105 */
106 return -ENOMEM; 92 return -ENOMEM;
107 } 93
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
100
101 ftrace_init_array_ops(tr, func);
108 102
109 tr->trace_buffer.cpu = get_cpu(); 103 tr->trace_buffer.cpu = get_cpu();
110 put_cpu(); 104 put_cpu();
@@ -118,6 +112,7 @@ static void function_trace_reset(struct trace_array *tr)
118{ 112{
119 tracing_stop_function_trace(tr); 113 tracing_stop_function_trace(tr);
120 tracing_stop_cmdline_record(); 114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
121} 116}
122 117
123static void function_trace_start(struct trace_array *tr) 118static void function_trace_start(struct trace_array *tr)
@@ -199,18 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
199 local_irq_restore(flags); 194 local_irq_restore(flags);
200} 195}
201 196
202static struct ftrace_ops trace_ops __read_mostly =
203{
204 .func = function_trace_call,
205 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206};
207
208static struct ftrace_ops trace_stack_ops __read_mostly =
209{
210 .func = function_stack_trace_call,
211 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212};
213
214static struct tracer_opt func_opts[] = { 197static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE 198#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -248,10 +231,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
248 unregister_ftrace_function(tr->ops); 231 unregister_ftrace_function(tr->ops);
249 232
250 if (set) { 233 if (set) {
251 tr->ops = &trace_stack_ops; 234 tr->ops->func = function_stack_trace_call;
252 register_ftrace_function(tr->ops); 235 register_ftrace_function(tr->ops);
253 } else { 236 } else {
254 tr->ops = &trace_ops; 237 tr->ops->func = function_trace_call;
255 register_ftrace_function(tr->ops); 238 register_ftrace_function(tr->ops);
256 } 239 }
257 240
@@ -269,7 +252,6 @@ static struct tracer function_trace __tracer_data =
269 .init = function_trace_init, 252 .init = function_trace_init,
270 .reset = function_trace_reset, 253 .reset = function_trace_reset,
271 .start = function_trace_start, 254 .start = function_trace_start,
272 .wait_pipe = poll_wait_pipe,
273 .flags = &func_flags, 255 .flags = &func_flags,
274 .set_flag = func_set_flag, 256 .set_flag = func_set_flag,
275 .allow_instances = true, 257 .allow_instances = true,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index deff11200261..4de3e57f723c 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -38,15 +38,6 @@ struct fgraph_data {
38 38
39#define TRACE_GRAPH_INDENT 2 39#define TRACE_GRAPH_INDENT 2
40 40
41/* Flag options */
42#define TRACE_GRAPH_PRINT_OVERRUN 0x1
43#define TRACE_GRAPH_PRINT_CPU 0x2
44#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45#define TRACE_GRAPH_PRINT_PROC 0x8
46#define TRACE_GRAPH_PRINT_DURATION 0x10
47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48#define TRACE_GRAPH_PRINT_IRQS 0x40
49
50static unsigned int max_depth; 41static unsigned int max_depth;
51 42
52static struct tracer_opt trace_opts[] = { 43static struct tracer_opt trace_opts[] = {
@@ -64,11 +55,13 @@ static struct tracer_opt trace_opts[] = {
64 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 55 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
65 /* Display interrupts */ 56 /* Display interrupts */
66 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, 57 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
58 /* Display function name after trailing } */
59 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
67 { } /* Empty entry */ 60 { } /* Empty entry */
68}; 61};
69 62
70static struct tracer_flags tracer_flags = { 63static struct tracer_flags tracer_flags = {
71 /* Don't display overruns and proc by default */ 64 /* Don't display overruns, proc, or tail by default */
72 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 65 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
73 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, 66 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
74 .opts = trace_opts 67 .opts = trace_opts
@@ -1176,9 +1169,10 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1176 * If the return function does not have a matching entry, 1169 * If the return function does not have a matching entry,
1177 * then the entry was lost. Instead of just printing 1170 * then the entry was lost. Instead of just printing
1178 * the '}' and letting the user guess what function this 1171 * the '}' and letting the user guess what function this
1179 * belongs to, write out the function name. 1172 * belongs to, write out the function name. Always do
1173 * that if the funcgraph-tail option is enabled.
1180 */ 1174 */
1181 if (func_match) { 1175 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
1182 ret = trace_seq_puts(s, "}\n"); 1176 ret = trace_seq_puts(s, "}\n");
1183 if (!ret) 1177 if (!ret)
1184 return TRACE_TYPE_PARTIAL_LINE; 1178 return TRACE_TYPE_PARTIAL_LINE;
@@ -1505,7 +1499,6 @@ static struct tracer graph_trace __tracer_data = {
1505 .pipe_open = graph_trace_open, 1499 .pipe_open = graph_trace_open,
1506 .close = graph_trace_close, 1500 .close = graph_trace_close,
1507 .pipe_close = graph_trace_close, 1501 .pipe_close = graph_trace_close,
1508 .wait_pipe = poll_wait_pipe,
1509 .init = graph_trace_init, 1502 .init = graph_trace_init,
1510 .reset = graph_trace_reset, 1503 .reset = graph_trace_reset,
1511 .print_line = print_graph_function, 1504 .print_line = print_graph_function,
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 8ff02cbb892f..9bb104f748d0 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -151,12 +151,6 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
151 151
152 atomic_dec(&data->disabled); 152 atomic_dec(&data->disabled);
153} 153}
154
155static struct ftrace_ops trace_ops __read_mostly =
156{
157 .func = irqsoff_tracer_call,
158 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
159};
160#endif /* CONFIG_FUNCTION_TRACER */ 154#endif /* CONFIG_FUNCTION_TRACER */
161 155
162#ifdef CONFIG_FUNCTION_GRAPH_TRACER 156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -176,7 +170,7 @@ irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
176 for_each_possible_cpu(cpu) 170 for_each_possible_cpu(cpu)
177 per_cpu(tracing_cpu, cpu) = 0; 171 per_cpu(tracing_cpu, cpu) = 0;
178 172
179 tracing_max_latency = 0; 173 tr->max_latency = 0;
180 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); 174 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
181 175
182 return start_irqsoff_tracer(irqsoff_trace, set); 176 return start_irqsoff_tracer(irqsoff_trace, set);
@@ -303,13 +297,13 @@ static void irqsoff_print_header(struct seq_file *s)
303/* 297/*
304 * Should this new latency be reported/recorded? 298 * Should this new latency be reported/recorded?
305 */ 299 */
306static int report_latency(cycle_t delta) 300static int report_latency(struct trace_array *tr, cycle_t delta)
307{ 301{
308 if (tracing_thresh) { 302 if (tracing_thresh) {
309 if (delta < tracing_thresh) 303 if (delta < tracing_thresh)
310 return 0; 304 return 0;
311 } else { 305 } else {
312 if (delta <= tracing_max_latency) 306 if (delta <= tr->max_latency)
313 return 0; 307 return 0;
314 } 308 }
315 return 1; 309 return 1;
@@ -333,13 +327,13 @@ check_critical_timing(struct trace_array *tr,
333 327
334 pc = preempt_count(); 328 pc = preempt_count();
335 329
336 if (!report_latency(delta)) 330 if (!report_latency(tr, delta))
337 goto out; 331 goto out;
338 332
339 raw_spin_lock_irqsave(&max_trace_lock, flags); 333 raw_spin_lock_irqsave(&max_trace_lock, flags);
340 334
341 /* check if we are still the max latency */ 335 /* check if we are still the max latency */
342 if (!report_latency(delta)) 336 if (!report_latency(tr, delta))
343 goto out_unlock; 337 goto out_unlock;
344 338
345 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 339 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
@@ -352,7 +346,7 @@ check_critical_timing(struct trace_array *tr,
352 data->critical_end = parent_ip; 346 data->critical_end = parent_ip;
353 347
354 if (likely(!is_tracing_stopped())) { 348 if (likely(!is_tracing_stopped())) {
355 tracing_max_latency = delta; 349 tr->max_latency = delta;
356 update_max_tr_single(tr, current, cpu); 350 update_max_tr_single(tr, current, cpu);
357 } 351 }
358 352
@@ -531,7 +525,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
531} 525}
532#endif /* CONFIG_PREEMPT_TRACER */ 526#endif /* CONFIG_PREEMPT_TRACER */
533 527
534static int register_irqsoff_function(int graph, int set) 528static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
535{ 529{
536 int ret; 530 int ret;
537 531
@@ -543,7 +537,7 @@ static int register_irqsoff_function(int graph, int set)
543 ret = register_ftrace_graph(&irqsoff_graph_return, 537 ret = register_ftrace_graph(&irqsoff_graph_return,
544 &irqsoff_graph_entry); 538 &irqsoff_graph_entry);
545 else 539 else
546 ret = register_ftrace_function(&trace_ops); 540 ret = register_ftrace_function(tr->ops);
547 541
548 if (!ret) 542 if (!ret)
549 function_enabled = true; 543 function_enabled = true;
@@ -551,7 +545,7 @@ static int register_irqsoff_function(int graph, int set)
551 return ret; 545 return ret;
552} 546}
553 547
554static void unregister_irqsoff_function(int graph) 548static void unregister_irqsoff_function(struct trace_array *tr, int graph)
555{ 549{
556 if (!function_enabled) 550 if (!function_enabled)
557 return; 551 return;
@@ -559,17 +553,17 @@ static void unregister_irqsoff_function(int graph)
559 if (graph) 553 if (graph)
560 unregister_ftrace_graph(); 554 unregister_ftrace_graph();
561 else 555 else
562 unregister_ftrace_function(&trace_ops); 556 unregister_ftrace_function(tr->ops);
563 557
564 function_enabled = false; 558 function_enabled = false;
565} 559}
566 560
567static void irqsoff_function_set(int set) 561static void irqsoff_function_set(struct trace_array *tr, int set)
568{ 562{
569 if (set) 563 if (set)
570 register_irqsoff_function(is_graph(), 1); 564 register_irqsoff_function(tr, is_graph(), 1);
571 else 565 else
572 unregister_irqsoff_function(is_graph()); 566 unregister_irqsoff_function(tr, is_graph());
573} 567}
574 568
575static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) 569static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
@@ -577,7 +571,7 @@ static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
577 struct tracer *tracer = tr->current_trace; 571 struct tracer *tracer = tr->current_trace;
578 572
579 if (mask & TRACE_ITER_FUNCTION) 573 if (mask & TRACE_ITER_FUNCTION)
580 irqsoff_function_set(set); 574 irqsoff_function_set(tr, set);
581 575
582 return trace_keep_overwrite(tracer, mask, set); 576 return trace_keep_overwrite(tracer, mask, set);
583} 577}
@@ -586,7 +580,7 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph)
586{ 580{
587 int ret; 581 int ret;
588 582
589 ret = register_irqsoff_function(graph, 0); 583 ret = register_irqsoff_function(tr, graph, 0);
590 584
591 if (!ret && tracing_is_enabled()) 585 if (!ret && tracing_is_enabled())
592 tracer_enabled = 1; 586 tracer_enabled = 1;
@@ -600,25 +594,37 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
600{ 594{
601 tracer_enabled = 0; 595 tracer_enabled = 0;
602 596
603 unregister_irqsoff_function(graph); 597 unregister_irqsoff_function(tr, graph);
604} 598}
605 599
606static void __irqsoff_tracer_init(struct trace_array *tr) 600static bool irqsoff_busy;
601
602static int __irqsoff_tracer_init(struct trace_array *tr)
607{ 603{
604 if (irqsoff_busy)
605 return -EBUSY;
606
608 save_flags = trace_flags; 607 save_flags = trace_flags;
609 608
610 /* non overwrite screws up the latency tracers */ 609 /* non overwrite screws up the latency tracers */
611 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); 610 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
612 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); 611 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
613 612
614 tracing_max_latency = 0; 613 tr->max_latency = 0;
615 irqsoff_trace = tr; 614 irqsoff_trace = tr;
616 /* make sure that the tracer is visible */ 615 /* make sure that the tracer is visible */
617 smp_wmb(); 616 smp_wmb();
618 tracing_reset_online_cpus(&tr->trace_buffer); 617 tracing_reset_online_cpus(&tr->trace_buffer);
619 618
620 if (start_irqsoff_tracer(tr, is_graph())) 619 ftrace_init_array_ops(tr, irqsoff_tracer_call);
620
621 /* Only toplevel instance supports graph tracing */
622 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
623 is_graph())))
621 printk(KERN_ERR "failed to start irqsoff tracer\n"); 624 printk(KERN_ERR "failed to start irqsoff tracer\n");
625
626 irqsoff_busy = true;
627 return 0;
622} 628}
623 629
624static void irqsoff_tracer_reset(struct trace_array *tr) 630static void irqsoff_tracer_reset(struct trace_array *tr)
@@ -630,6 +636,9 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
630 636
631 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); 637 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
632 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); 638 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
639 ftrace_reset_array_ops(tr);
640
641 irqsoff_busy = false;
633} 642}
634 643
635static void irqsoff_tracer_start(struct trace_array *tr) 644static void irqsoff_tracer_start(struct trace_array *tr)
@@ -647,8 +656,7 @@ static int irqsoff_tracer_init(struct trace_array *tr)
647{ 656{
648 trace_type = TRACER_IRQS_OFF; 657 trace_type = TRACER_IRQS_OFF;
649 658
650 __irqsoff_tracer_init(tr); 659 return __irqsoff_tracer_init(tr);
651 return 0;
652} 660}
653static struct tracer irqsoff_tracer __read_mostly = 661static struct tracer irqsoff_tracer __read_mostly =
654{ 662{
@@ -668,6 +676,7 @@ static struct tracer irqsoff_tracer __read_mostly =
668#endif 676#endif
669 .open = irqsoff_trace_open, 677 .open = irqsoff_trace_open,
670 .close = irqsoff_trace_close, 678 .close = irqsoff_trace_close,
679 .allow_instances = true,
671 .use_max_tr = true, 680 .use_max_tr = true,
672}; 681};
673# define register_irqsoff(trace) register_tracer(&trace) 682# define register_irqsoff(trace) register_tracer(&trace)
@@ -680,8 +689,7 @@ static int preemptoff_tracer_init(struct trace_array *tr)
680{ 689{
681 trace_type = TRACER_PREEMPT_OFF; 690 trace_type = TRACER_PREEMPT_OFF;
682 691
683 __irqsoff_tracer_init(tr); 692 return __irqsoff_tracer_init(tr);
684 return 0;
685} 693}
686 694
687static struct tracer preemptoff_tracer __read_mostly = 695static struct tracer preemptoff_tracer __read_mostly =
@@ -702,6 +710,7 @@ static struct tracer preemptoff_tracer __read_mostly =
702#endif 710#endif
703 .open = irqsoff_trace_open, 711 .open = irqsoff_trace_open,
704 .close = irqsoff_trace_close, 712 .close = irqsoff_trace_close,
713 .allow_instances = true,
705 .use_max_tr = true, 714 .use_max_tr = true,
706}; 715};
707# define register_preemptoff(trace) register_tracer(&trace) 716# define register_preemptoff(trace) register_tracer(&trace)
@@ -716,8 +725,7 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr)
716{ 725{
717 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 726 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
718 727
719 __irqsoff_tracer_init(tr); 728 return __irqsoff_tracer_init(tr);
720 return 0;
721} 729}
722 730
723static struct tracer preemptirqsoff_tracer __read_mostly = 731static struct tracer preemptirqsoff_tracer __read_mostly =
@@ -738,6 +746,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
738#endif 746#endif
739 .open = irqsoff_trace_open, 747 .open = irqsoff_trace_open,
740 .close = irqsoff_trace_close, 748 .close = irqsoff_trace_close,
749 .allow_instances = true,
741 .use_max_tr = true, 750 .use_max_tr = true,
742}; 751};
743 752
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 903ae28962be..ef2fba1f46b5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1377,6 +1377,9 @@ static __init int kprobe_trace_self_tests_init(void)
1377 struct trace_kprobe *tk; 1377 struct trace_kprobe *tk;
1378 struct ftrace_event_file *file; 1378 struct ftrace_event_file *file;
1379 1379
1380 if (tracing_is_disabled())
1381 return -ENODEV;
1382
1380 target = kprobe_trace_selftest_target; 1383 target = kprobe_trace_selftest_target;
1381 1384
1382 pr_info("Testing kprobe tracing: "); 1385 pr_info("Testing kprobe tracing: ");
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 69a5cc94c01a..fcf0a9e48916 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -91,7 +91,6 @@ struct tracer nop_trace __read_mostly =
91 .name = "nop", 91 .name = "nop",
92 .init = nop_trace_init, 92 .init = nop_trace_init,
93 .reset = nop_trace_reset, 93 .reset = nop_trace_reset,
94 .wait_pipe = poll_wait_pipe,
95#ifdef CONFIG_FTRACE_SELFTEST 94#ifdef CONFIG_FTRACE_SELFTEST
96 .selftest = trace_selftest_startup_nop, 95 .selftest = trace_selftest_startup_nop,
97#endif 96#endif
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a436de18aa99..f3dad80c20b2 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -126,6 +126,34 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
126EXPORT_SYMBOL_GPL(trace_seq_printf); 126EXPORT_SYMBOL_GPL(trace_seq_printf);
127 127
128/** 128/**
129 * trace_seq_bitmask - put a list of longs as a bitmask print output
130 * @s: trace sequence descriptor
131 * @maskp: points to an array of unsigned longs that represent a bitmask
132 * @nmaskbits: The number of bits that are valid in @maskp
133 *
134 * It returns 0 if the trace oversizes the buffer's free
135 * space, 1 otherwise.
136 *
137 * Writes a ASCII representation of a bitmask string into @s.
138 */
139int
140trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
141 int nmaskbits)
142{
143 int len = (PAGE_SIZE - 1) - s->len;
144 int ret;
145
146 if (s->full || !len)
147 return 0;
148
149 ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
150 s->len += ret;
151
152 return 1;
153}
154EXPORT_SYMBOL_GPL(trace_seq_bitmask);
155
156/**
129 * trace_seq_vprintf - sequence printing of trace information 157 * trace_seq_vprintf - sequence printing of trace information
130 * @s: trace sequence descriptor 158 * @s: trace sequence descriptor
131 * @fmt: printf format string 159 * @fmt: printf format string
@@ -399,6 +427,19 @@ EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
399#endif 427#endif
400 428
401const char * 429const char *
430ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
431 unsigned int bitmask_size)
432{
433 const char *ret = p->buffer + p->len;
434
435 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
436 trace_seq_putc(p, 0);
437
438 return ret;
439}
440EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq);
441
442const char *
402ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 443ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
403{ 444{
404 int i; 445 int i;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e14da5e97a69..19bd8928ce94 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -130,15 +130,9 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
130 atomic_dec(&data->disabled); 130 atomic_dec(&data->disabled);
131 preempt_enable_notrace(); 131 preempt_enable_notrace();
132} 132}
133
134static struct ftrace_ops trace_ops __read_mostly =
135{
136 .func = wakeup_tracer_call,
137 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
138};
139#endif /* CONFIG_FUNCTION_TRACER */ 133#endif /* CONFIG_FUNCTION_TRACER */
140 134
141static int register_wakeup_function(int graph, int set) 135static int register_wakeup_function(struct trace_array *tr, int graph, int set)
142{ 136{
143 int ret; 137 int ret;
144 138
@@ -150,7 +144,7 @@ static int register_wakeup_function(int graph, int set)
150 ret = register_ftrace_graph(&wakeup_graph_return, 144 ret = register_ftrace_graph(&wakeup_graph_return,
151 &wakeup_graph_entry); 145 &wakeup_graph_entry);
152 else 146 else
153 ret = register_ftrace_function(&trace_ops); 147 ret = register_ftrace_function(tr->ops);
154 148
155 if (!ret) 149 if (!ret)
156 function_enabled = true; 150 function_enabled = true;
@@ -158,7 +152,7 @@ static int register_wakeup_function(int graph, int set)
158 return ret; 152 return ret;
159} 153}
160 154
161static void unregister_wakeup_function(int graph) 155static void unregister_wakeup_function(struct trace_array *tr, int graph)
162{ 156{
163 if (!function_enabled) 157 if (!function_enabled)
164 return; 158 return;
@@ -166,17 +160,17 @@ static void unregister_wakeup_function(int graph)
166 if (graph) 160 if (graph)
167 unregister_ftrace_graph(); 161 unregister_ftrace_graph();
168 else 162 else
169 unregister_ftrace_function(&trace_ops); 163 unregister_ftrace_function(tr->ops);
170 164
171 function_enabled = false; 165 function_enabled = false;
172} 166}
173 167
174static void wakeup_function_set(int set) 168static void wakeup_function_set(struct trace_array *tr, int set)
175{ 169{
176 if (set) 170 if (set)
177 register_wakeup_function(is_graph(), 1); 171 register_wakeup_function(tr, is_graph(), 1);
178 else 172 else
179 unregister_wakeup_function(is_graph()); 173 unregister_wakeup_function(tr, is_graph());
180} 174}
181 175
182static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) 176static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
@@ -184,16 +178,16 @@ static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
184 struct tracer *tracer = tr->current_trace; 178 struct tracer *tracer = tr->current_trace;
185 179
186 if (mask & TRACE_ITER_FUNCTION) 180 if (mask & TRACE_ITER_FUNCTION)
187 wakeup_function_set(set); 181 wakeup_function_set(tr, set);
188 182
189 return trace_keep_overwrite(tracer, mask, set); 183 return trace_keep_overwrite(tracer, mask, set);
190} 184}
191 185
192static int start_func_tracer(int graph) 186static int start_func_tracer(struct trace_array *tr, int graph)
193{ 187{
194 int ret; 188 int ret;
195 189
196 ret = register_wakeup_function(graph, 0); 190 ret = register_wakeup_function(tr, graph, 0);
197 191
198 if (!ret && tracing_is_enabled()) 192 if (!ret && tracing_is_enabled())
199 tracer_enabled = 1; 193 tracer_enabled = 1;
@@ -203,11 +197,11 @@ static int start_func_tracer(int graph)
203 return ret; 197 return ret;
204} 198}
205 199
206static void stop_func_tracer(int graph) 200static void stop_func_tracer(struct trace_array *tr, int graph)
207{ 201{
208 tracer_enabled = 0; 202 tracer_enabled = 0;
209 203
210 unregister_wakeup_function(graph); 204 unregister_wakeup_function(tr, graph);
211} 205}
212 206
213#ifdef CONFIG_FUNCTION_GRAPH_TRACER 207#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -221,12 +215,12 @@ wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
221 if (!(is_graph() ^ set)) 215 if (!(is_graph() ^ set))
222 return 0; 216 return 0;
223 217
224 stop_func_tracer(!set); 218 stop_func_tracer(tr, !set);
225 219
226 wakeup_reset(wakeup_trace); 220 wakeup_reset(wakeup_trace);
227 tracing_max_latency = 0; 221 tr->max_latency = 0;
228 222
229 return start_func_tracer(set); 223 return start_func_tracer(tr, set);
230} 224}
231 225
232static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 226static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
@@ -350,13 +344,13 @@ static void wakeup_print_header(struct seq_file *s)
350/* 344/*
351 * Should this new latency be reported/recorded? 345 * Should this new latency be reported/recorded?
352 */ 346 */
353static int report_latency(cycle_t delta) 347static int report_latency(struct trace_array *tr, cycle_t delta)
354{ 348{
355 if (tracing_thresh) { 349 if (tracing_thresh) {
356 if (delta < tracing_thresh) 350 if (delta < tracing_thresh)
357 return 0; 351 return 0;
358 } else { 352 } else {
359 if (delta <= tracing_max_latency) 353 if (delta <= tr->max_latency)
360 return 0; 354 return 0;
361 } 355 }
362 return 1; 356 return 1;
@@ -424,11 +418,11 @@ probe_wakeup_sched_switch(void *ignore,
424 T1 = ftrace_now(cpu); 418 T1 = ftrace_now(cpu);
425 delta = T1-T0; 419 delta = T1-T0;
426 420
427 if (!report_latency(delta)) 421 if (!report_latency(wakeup_trace, delta))
428 goto out_unlock; 422 goto out_unlock;
429 423
430 if (likely(!is_tracing_stopped())) { 424 if (likely(!is_tracing_stopped())) {
431 tracing_max_latency = delta; 425 wakeup_trace->max_latency = delta;
432 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); 426 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
433 } 427 }
434 428
@@ -587,7 +581,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
587 */ 581 */
588 smp_wmb(); 582 smp_wmb();
589 583
590 if (start_func_tracer(is_graph())) 584 if (start_func_tracer(tr, is_graph()))
591 printk(KERN_ERR "failed to start wakeup tracer\n"); 585 printk(KERN_ERR "failed to start wakeup tracer\n");
592 586
593 return; 587 return;
@@ -600,13 +594,15 @@ fail_deprobe:
600static void stop_wakeup_tracer(struct trace_array *tr) 594static void stop_wakeup_tracer(struct trace_array *tr)
601{ 595{
602 tracer_enabled = 0; 596 tracer_enabled = 0;
603 stop_func_tracer(is_graph()); 597 stop_func_tracer(tr, is_graph());
604 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); 598 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
605 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 599 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
606 unregister_trace_sched_wakeup(probe_wakeup, NULL); 600 unregister_trace_sched_wakeup(probe_wakeup, NULL);
607 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); 601 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
608} 602}
609 603
604static bool wakeup_busy;
605
610static int __wakeup_tracer_init(struct trace_array *tr) 606static int __wakeup_tracer_init(struct trace_array *tr)
611{ 607{
612 save_flags = trace_flags; 608 save_flags = trace_flags;
@@ -615,14 +611,20 @@ static int __wakeup_tracer_init(struct trace_array *tr)
615 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); 611 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
616 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); 612 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
617 613
618 tracing_max_latency = 0; 614 tr->max_latency = 0;
619 wakeup_trace = tr; 615 wakeup_trace = tr;
616 ftrace_init_array_ops(tr, wakeup_tracer_call);
620 start_wakeup_tracer(tr); 617 start_wakeup_tracer(tr);
618
619 wakeup_busy = true;
621 return 0; 620 return 0;
622} 621}
623 622
624static int wakeup_tracer_init(struct trace_array *tr) 623static int wakeup_tracer_init(struct trace_array *tr)
625{ 624{
625 if (wakeup_busy)
626 return -EBUSY;
627
626 wakeup_dl = 0; 628 wakeup_dl = 0;
627 wakeup_rt = 0; 629 wakeup_rt = 0;
628 return __wakeup_tracer_init(tr); 630 return __wakeup_tracer_init(tr);
@@ -630,6 +632,9 @@ static int wakeup_tracer_init(struct trace_array *tr)
630 632
631static int wakeup_rt_tracer_init(struct trace_array *tr) 633static int wakeup_rt_tracer_init(struct trace_array *tr)
632{ 634{
635 if (wakeup_busy)
636 return -EBUSY;
637
633 wakeup_dl = 0; 638 wakeup_dl = 0;
634 wakeup_rt = 1; 639 wakeup_rt = 1;
635 return __wakeup_tracer_init(tr); 640 return __wakeup_tracer_init(tr);
@@ -637,6 +642,9 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
637 642
638static int wakeup_dl_tracer_init(struct trace_array *tr) 643static int wakeup_dl_tracer_init(struct trace_array *tr)
639{ 644{
645 if (wakeup_busy)
646 return -EBUSY;
647
640 wakeup_dl = 1; 648 wakeup_dl = 1;
641 wakeup_rt = 0; 649 wakeup_rt = 0;
642 return __wakeup_tracer_init(tr); 650 return __wakeup_tracer_init(tr);
@@ -653,6 +661,8 @@ static void wakeup_tracer_reset(struct trace_array *tr)
653 661
654 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); 662 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
655 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); 663 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
664 ftrace_reset_array_ops(tr);
665 wakeup_busy = false;
656} 666}
657 667
658static void wakeup_tracer_start(struct trace_array *tr) 668static void wakeup_tracer_start(struct trace_array *tr)
@@ -684,6 +694,7 @@ static struct tracer wakeup_tracer __read_mostly =
684#endif 694#endif
685 .open = wakeup_trace_open, 695 .open = wakeup_trace_open,
686 .close = wakeup_trace_close, 696 .close = wakeup_trace_close,
697 .allow_instances = true,
687 .use_max_tr = true, 698 .use_max_tr = true,
688}; 699};
689 700
@@ -694,7 +705,6 @@ static struct tracer wakeup_rt_tracer __read_mostly =
694 .reset = wakeup_tracer_reset, 705 .reset = wakeup_tracer_reset,
695 .start = wakeup_tracer_start, 706 .start = wakeup_tracer_start,
696 .stop = wakeup_tracer_stop, 707 .stop = wakeup_tracer_stop,
697 .wait_pipe = poll_wait_pipe,
698 .print_max = true, 708 .print_max = true,
699 .print_header = wakeup_print_header, 709 .print_header = wakeup_print_header,
700 .print_line = wakeup_print_line, 710 .print_line = wakeup_print_line,
@@ -706,6 +716,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
706#endif 716#endif
707 .open = wakeup_trace_open, 717 .open = wakeup_trace_open,
708 .close = wakeup_trace_close, 718 .close = wakeup_trace_close,
719 .allow_instances = true,
709 .use_max_tr = true, 720 .use_max_tr = true,
710}; 721};
711 722
@@ -716,7 +727,6 @@ static struct tracer wakeup_dl_tracer __read_mostly =
716 .reset = wakeup_tracer_reset, 727 .reset = wakeup_tracer_reset,
717 .start = wakeup_tracer_start, 728 .start = wakeup_tracer_start,
718 .stop = wakeup_tracer_stop, 729 .stop = wakeup_tracer_stop,
719 .wait_pipe = poll_wait_pipe,
720 .print_max = true, 730 .print_max = true,
721 .print_header = wakeup_print_header, 731 .print_header = wakeup_print_header,
722 .print_line = wakeup_print_line, 732 .print_line = wakeup_print_line,
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index e98fca60974f..5ef60499dc8e 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -65,7 +65,7 @@ static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
65 65
66 /* Don't allow flipping of max traces now */ 66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags); 67 local_irq_save(flags);
68 arch_spin_lock(&ftrace_max_lock); 68 arch_spin_lock(&buf->tr->max_lock);
69 69
70 cnt = ring_buffer_entries(buf->buffer); 70 cnt = ring_buffer_entries(buf->buffer);
71 71
@@ -83,7 +83,7 @@ static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
83 break; 83 break;
84 } 84 }
85 tracing_on(); 85 tracing_on();
86 arch_spin_unlock(&ftrace_max_lock); 86 arch_spin_unlock(&buf->tr->max_lock);
87 local_irq_restore(flags); 87 local_irq_restore(flags);
88 88
89 if (count) 89 if (count)
@@ -161,11 +161,6 @@ static struct ftrace_ops test_probe3 = {
161 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
162}; 162};
163 163
164static struct ftrace_ops test_global = {
165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
167};
168
169static void print_counts(void) 164static void print_counts(void)
170{ 165{
171 printk("(%d %d %d %d %d) ", 166 printk("(%d %d %d %d %d) ",
@@ -185,7 +180,7 @@ static void reset_counts(void)
185 trace_selftest_test_dyn_cnt = 0; 180 trace_selftest_test_dyn_cnt = 0;
186} 181}
187 182
188static int trace_selftest_ops(int cnt) 183static int trace_selftest_ops(struct trace_array *tr, int cnt)
189{ 184{
190 int save_ftrace_enabled = ftrace_enabled; 185 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops; 186 struct ftrace_ops *dyn_ops;
@@ -220,7 +215,11 @@ static int trace_selftest_ops(int cnt)
220 register_ftrace_function(&test_probe1); 215 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2); 216 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3); 217 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global); 218 /* First time we are running with main function */
219 if (cnt > 1) {
220 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
221 register_ftrace_function(tr->ops);
222 }
224 223
225 DYN_FTRACE_TEST_NAME(); 224 DYN_FTRACE_TEST_NAME();
226 225
@@ -232,8 +231,10 @@ static int trace_selftest_ops(int cnt)
232 goto out; 231 goto out;
233 if (trace_selftest_test_probe3_cnt != 1) 232 if (trace_selftest_test_probe3_cnt != 1)
234 goto out; 233 goto out;
235 if (trace_selftest_test_global_cnt == 0) 234 if (cnt > 1) {
236 goto out; 235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237 }
237 238
238 DYN_FTRACE_TEST_NAME2(); 239 DYN_FTRACE_TEST_NAME2();
239 240
@@ -269,8 +270,10 @@ static int trace_selftest_ops(int cnt)
269 goto out_free; 270 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3) 271 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free; 272 goto out_free;
272 if (trace_selftest_test_global_cnt == 0) 273 if (cnt > 1) {
273 goto out; 274 if (trace_selftest_test_global_cnt == 0)
275 goto out;
276 }
274 if (trace_selftest_test_dyn_cnt == 0) 277 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free; 278 goto out_free;
276 279
@@ -295,7 +298,9 @@ static int trace_selftest_ops(int cnt)
295 unregister_ftrace_function(&test_probe1); 298 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2); 299 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3); 300 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global); 301 if (cnt > 1)
302 unregister_ftrace_function(tr->ops);
303 ftrace_reset_array_ops(tr);
299 304
300 /* Make sure everything is off */ 305 /* Make sure everything is off */
301 reset_counts(); 306 reset_counts();
@@ -315,9 +320,9 @@ static int trace_selftest_ops(int cnt)
315} 320}
316 321
317/* Test dynamic code modification and ftrace filters */ 322/* Test dynamic code modification and ftrace filters */
318int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 323static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 struct trace_array *tr, 324 struct trace_array *tr,
320 int (*func)(void)) 325 int (*func)(void))
321{ 326{
322 int save_ftrace_enabled = ftrace_enabled; 327 int save_ftrace_enabled = ftrace_enabled;
323 unsigned long count; 328 unsigned long count;
@@ -388,7 +393,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
388 } 393 }
389 394
390 /* Test the ops with global tracing running */ 395 /* Test the ops with global tracing running */
391 ret = trace_selftest_ops(1); 396 ret = trace_selftest_ops(tr, 1);
392 trace->reset(tr); 397 trace->reset(tr);
393 398
394 out: 399 out:
@@ -399,7 +404,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
399 404
400 /* Test the ops with global tracing off */ 405 /* Test the ops with global tracing off */
401 if (!ret) 406 if (!ret)
402 ret = trace_selftest_ops(2); 407 ret = trace_selftest_ops(tr, 2);
403 408
404 return ret; 409 return ret;
405} 410}
@@ -802,7 +807,7 @@ out:
802int 807int
803trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 808trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
804{ 809{
805 unsigned long save_max = tracing_max_latency; 810 unsigned long save_max = tr->max_latency;
806 unsigned long count; 811 unsigned long count;
807 int ret; 812 int ret;
808 813
@@ -814,7 +819,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
814 } 819 }
815 820
816 /* reset the max latency */ 821 /* reset the max latency */
817 tracing_max_latency = 0; 822 tr->max_latency = 0;
818 /* disable interrupts for a bit */ 823 /* disable interrupts for a bit */
819 local_irq_disable(); 824 local_irq_disable();
820 udelay(100); 825 udelay(100);
@@ -841,7 +846,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
841 ret = -1; 846 ret = -1;
842 } 847 }
843 848
844 tracing_max_latency = save_max; 849 tr->max_latency = save_max;
845 850
846 return ret; 851 return ret;
847} 852}
@@ -851,7 +856,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
851int 856int
852trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 857trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
853{ 858{
854 unsigned long save_max = tracing_max_latency; 859 unsigned long save_max = tr->max_latency;
855 unsigned long count; 860 unsigned long count;
856 int ret; 861 int ret;
857 862
@@ -876,7 +881,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
876 } 881 }
877 882
878 /* reset the max latency */ 883 /* reset the max latency */
879 tracing_max_latency = 0; 884 tr->max_latency = 0;
880 /* disable preemption for a bit */ 885 /* disable preemption for a bit */
881 preempt_disable(); 886 preempt_disable();
882 udelay(100); 887 udelay(100);
@@ -903,7 +908,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
903 ret = -1; 908 ret = -1;
904 } 909 }
905 910
906 tracing_max_latency = save_max; 911 tr->max_latency = save_max;
907 912
908 return ret; 913 return ret;
909} 914}
@@ -913,7 +918,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
913int 918int
914trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 919trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
915{ 920{
916 unsigned long save_max = tracing_max_latency; 921 unsigned long save_max = tr->max_latency;
917 unsigned long count; 922 unsigned long count;
918 int ret; 923 int ret;
919 924
@@ -938,7 +943,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
938 } 943 }
939 944
940 /* reset the max latency */ 945 /* reset the max latency */
941 tracing_max_latency = 0; 946 tr->max_latency = 0;
942 947
943 /* disable preemption and interrupts for a bit */ 948 /* disable preemption and interrupts for a bit */
944 preempt_disable(); 949 preempt_disable();
@@ -973,7 +978,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
973 } 978 }
974 979
975 /* do the test by disabling interrupts first this time */ 980 /* do the test by disabling interrupts first this time */
976 tracing_max_latency = 0; 981 tr->max_latency = 0;
977 tracing_start(); 982 tracing_start();
978 trace->start(tr); 983 trace->start(tr);
979 984
@@ -1004,7 +1009,7 @@ out:
1004 tracing_start(); 1009 tracing_start();
1005out_no_start: 1010out_no_start:
1006 trace->reset(tr); 1011 trace->reset(tr);
1007 tracing_max_latency = save_max; 1012 tr->max_latency = save_max;
1008 1013
1009 return ret; 1014 return ret;
1010} 1015}
@@ -1057,7 +1062,7 @@ static int trace_wakeup_test_thread(void *data)
1057int 1062int
1058trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1063trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1059{ 1064{
1060 unsigned long save_max = tracing_max_latency; 1065 unsigned long save_max = tr->max_latency;
1061 struct task_struct *p; 1066 struct task_struct *p;
1062 struct completion is_ready; 1067 struct completion is_ready;
1063 unsigned long count; 1068 unsigned long count;
@@ -1083,7 +1088,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1083 } 1088 }
1084 1089
1085 /* reset the max latency */ 1090 /* reset the max latency */
1086 tracing_max_latency = 0; 1091 tr->max_latency = 0;
1087 1092
1088 while (p->on_rq) { 1093 while (p->on_rq) {
1089 /* 1094 /*
@@ -1113,7 +1118,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1113 trace->reset(tr); 1118 trace->reset(tr);
1114 tracing_start(); 1119 tracing_start();
1115 1120
1116 tracing_max_latency = save_max; 1121 tr->max_latency = save_max;
1117 1122
1118 /* kill the thread */ 1123 /* kill the thread */
1119 kthread_stop(p); 1124 kthread_stop(p);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 21b320e5d163..8a4e5cb66a4c 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -51,11 +51,33 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
51int stack_tracer_enabled; 51int stack_tracer_enabled;
52static int last_stack_tracer_enabled; 52static int last_stack_tracer_enabled;
53 53
54static inline void print_max_stack(void)
55{
56 long i;
57 int size;
58
59 pr_emerg(" Depth Size Location (%d entries)\n"
60 " ----- ---- --------\n",
61 max_stack_trace.nr_entries - 1);
62
63 for (i = 0; i < max_stack_trace.nr_entries; i++) {
64 if (stack_dump_trace[i] == ULONG_MAX)
65 break;
66 if (i+1 == max_stack_trace.nr_entries ||
67 stack_dump_trace[i+1] == ULONG_MAX)
68 size = stack_dump_index[i];
69 else
70 size = stack_dump_index[i] - stack_dump_index[i+1];
71
72 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
73 size, (void *)stack_dump_trace[i]);
74 }
75}
76
54static inline void 77static inline void
55check_stack(unsigned long ip, unsigned long *stack) 78check_stack(unsigned long ip, unsigned long *stack)
56{ 79{
57 unsigned long this_size, flags; 80 unsigned long this_size, flags; unsigned long *p, *top, *start;
58 unsigned long *p, *top, *start;
59 static int tracer_frame; 81 static int tracer_frame;
60 int frame_size = ACCESS_ONCE(tracer_frame); 82 int frame_size = ACCESS_ONCE(tracer_frame);
61 int i; 83 int i;
@@ -85,8 +107,12 @@ check_stack(unsigned long ip, unsigned long *stack)
85 107
86 max_stack_size = this_size; 108 max_stack_size = this_size;
87 109
88 max_stack_trace.nr_entries = 0; 110 max_stack_trace.nr_entries = 0;
89 max_stack_trace.skip = 3; 111
112 if (using_ftrace_ops_list_func())
113 max_stack_trace.skip = 4;
114 else
115 max_stack_trace.skip = 3;
90 116
91 save_stack_trace(&max_stack_trace); 117 save_stack_trace(&max_stack_trace);
92 118
@@ -145,8 +171,12 @@ check_stack(unsigned long ip, unsigned long *stack)
145 i++; 171 i++;
146 } 172 }
147 173
148 BUG_ON(current != &init_task && 174 if ((current != &init_task &&
149 *(end_of_stack(current)) != STACK_END_MAGIC); 175 *(end_of_stack(current)) != STACK_END_MAGIC)) {
176 print_max_stack();
177 BUG();
178 }
179
150 out: 180 out:
151 arch_spin_unlock(&max_stack_lock); 181 arch_spin_unlock(&max_stack_lock);
152 local_irq_restore(flags); 182 local_irq_restore(flags);